aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/apne.c1
-rw-r--r--drivers/net/ethernet/8390/hydra.c1
-rw-r--r--drivers/net/ethernet/8390/stnic.c1
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c1
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c50
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c693
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h99
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c468
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.h151
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c64
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c12
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c16
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c32
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c74
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c23
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c2
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h8
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c6
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2682
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h503
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c740
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h521
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c39
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c42
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c28
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c24
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c15
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c28
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c33
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c132
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c18
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c48
-rw-r--r--drivers/net/ethernet/google/gve/gve.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c88
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h9
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c25
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c82
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c40
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h20
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c22
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h164
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c214
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c287
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debug.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c304
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c804
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c139
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c86
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c42
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c101
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c27
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c21
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c41
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c330
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c667
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c123
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c568
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c482
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c470
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h412
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c372
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c336
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c319
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c444
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c195
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c67
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c70
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c19
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c17
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c48
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c33
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c88
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c175
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c113
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c168
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c97
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c25
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c129
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c84
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c925
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h48
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h173
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c241
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h65
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c449
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h167
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h13
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h416
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h34
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c726
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c137
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c127
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c853
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c44
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c35
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c81
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c51
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c73
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c200
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c537
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c90
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c50
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c74
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c33
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c115
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c32
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c12
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig7
-rw-r--r--drivers/net/ethernet/realtek/Makefile3
-rw-r--r--drivers/net/ethernet/realtek/r8169.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c157
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c216
-rw-r--r--drivers/net/ethernet/renesas/Kconfig12
-rw-r--r--drivers/net/ethernet/renesas/Makefile5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c40
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h9
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c381
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h43
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c126
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h13
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c30
-rw-r--r--drivers/net/ethernet/sfc/ptp.h7
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c126
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h13
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c30
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h7
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c117
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c320
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c8
-rw-r--r--drivers/net/ethernet/ti/Kconfig15
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c272
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c283
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h9
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c708
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h186
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c18
-rw-r--r--drivers/net/ethernet/ti/cpts.c17
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c16
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c177
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h30
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c238
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h27
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c275
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c155
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h94
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c82
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c86
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c114
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c82
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c63
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c57
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h15
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c667
502 files changed, 24084 insertions, 10263 deletions
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 0e0aa4016858..c5636245f1ca 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6834742057b3..6d429b11e9c6 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index a09f383dd249..828edca8d30c 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 24f49a8ff903..fd9dcdc356e6 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 265976e3b64a..6cc0e190aa79 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index d70390e9d03d..c24dd4fe7a10 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index da3bdd302502..760a9a60bc15 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -21,6 +21,7 @@ config ADIN1110
tristate "Analog Devices ADIN1110 MAC-PHY"
depends on SPI && NET_SWITCHDEV
select CRC8
+ select PHYLIB
help
Say yes here to build support for Analog Devices ADIN1110
Low Power 10BASE-T1L Ethernet MAC-PHY.
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index f1f752a8f7bb..6ab615365172 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index d671df4b76bc..0cb6cc1cef56 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include "ena_netdev.h"
+#include "ena_xdp.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -262,17 +263,14 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
ena_stats->name);
}
- if (!is_xdp) {
- /* RX stats, in XDP there isn't a RX queue
- * counterpart
- */
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
+ /* In XDP there isn't an RX queue counterpart */
+ if (is_xdp)
+ continue;
- ethtool_sprintf(data,
- "queue_%u_rx_%s", i,
- ena_stats->name);
- }
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name);
}
}
}
@@ -299,13 +297,13 @@ static void ena_get_strings(struct ena_adapter *adapter,
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
- ethtool_sprintf(&data, ena_stats->name);
+ ethtool_puts(&data, ena_stats->name);
}
if (eni_stats_needed) {
for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
ena_stats = &ena_stats_eni_strings[i];
- ethtool_sprintf(&data, ena_stats->name);
+ ethtool_puts(&data, ena_stats->name);
}
}
@@ -802,15 +800,15 @@ static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
return rc;
}
-static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ena_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ena_adapter *adapter = netdev_priv(netdev);
enum ena_admin_hash_functions ena_func;
u8 func;
int rc;
- rc = ena_indirection_table_get(adapter, indir);
+ rc = ena_indirection_table_get(adapter, rxfh->indir);
if (rc)
return rc;
@@ -825,7 +823,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return rc;
}
- rc = ena_com_get_hash_key(adapter->ena_dev, key);
+ rc = ena_com_get_hash_key(adapter->ena_dev, rxfh->key);
if (rc)
return rc;
@@ -842,27 +840,27 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return -EOPNOTSUPP;
}
- if (hfunc)
- *hfunc = func;
+ rxfh->hfunc = func;
return 0;
}
-static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ena_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_hash_functions func = 0;
int rc;
- if (indir) {
- rc = ena_indirection_table_set(adapter, indir);
+ if (rxfh->indir) {
+ rc = ena_indirection_table_set(adapter, rxfh->indir);
if (rc)
return rc;
}
- switch (hfunc) {
+ switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
func = ena_com_get_current_hash_function(ena_dev);
break;
@@ -874,12 +872,12 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
break;
default:
netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
- hfunc);
+ rxfh->hfunc);
return -EOPNOTSUPP;
}
- if (key || func) {
- rc = ena_com_fill_hash_function(ena_dev, func, key,
+ if (rxfh->key || func) {
+ rc = ena_com_fill_hash_function(ena_dev, func, rxfh->key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
if (unlikely(rc)) {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c44c44e26ddf..1c0a7828d397 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -19,8 +19,8 @@
#include <net/ip.h>
#include "ena_netdev.h"
-#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
+#include "ena_xdp.h"
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
@@ -45,53 +45,6 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count);
-static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
-static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
-static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
-static void ena_napi_disable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_napi_enable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static int ena_up(struct ena_adapter *adapter);
-static void ena_down(struct ena_adapter *adapter);
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info);
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-
-/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
-static void ena_increase_stat(u64 *statp, u64 cnt,
- struct u64_stats_sync *syncp)
-{
- u64_stats_update_begin(syncp);
- (*statp) += cnt;
- u64_stats_update_end(syncp);
-}
-
-static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
-{
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
-}
-
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -135,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static int ena_xmit_common(struct net_device *dev,
- struct ena_ring *ring,
- struct ena_tx_buffer *tx_info,
- struct ena_com_tx_ctx *ena_tx_ctx,
- u16 next_to_use,
- u32 bytes)
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
{
- struct ena_adapter *adapter = netdev_priv(dev);
int rc, nb_hw_desc;
if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
ena_ring_tx_doorbell(ring);
@@ -162,7 +114,7 @@ static int ena_xmit_common(struct net_device *dev,
* ena_com_prepare_tx() are fatal and therefore require a device reset.
*/
if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
+ netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
@@ -178,6 +130,7 @@ static int ena_xmit_common(struct net_device *dev,
u64_stats_update_end(&ring->syncp);
tx_info->tx_descs = nb_hw_desc;
+ tx_info->total_tx_size = bytes;
tx_info->last_jiffies = jiffies;
tx_info->print_once = 0;
@@ -186,467 +139,6 @@ static int ena_xmit_common(struct net_device *dev,
return 0;
}
-/* This is the XDP napi callback. XDP queues use a separate napi callback
- * than Rx/Tx queues.
- */
-static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
-{
- struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
- u32 xdp_work_done, xdp_budget;
- struct ena_ring *xdp_ring;
- int napi_comp_call = 0;
- int ret;
-
- xdp_ring = ena_napi->xdp_ring;
-
- xdp_budget = budget;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
- napi_complete_done(napi, 0);
- return 0;
- }
-
- xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
-
- /* If the device is about to reset or down, avoid unmask
- * the interrupt and return 0 so NAPI won't reschedule
- */
- if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
- napi_complete_done(napi, 0);
- ret = 0;
- } else if (xdp_budget > xdp_work_done) {
- napi_comp_call = 1;
- if (napi_complete_done(napi, xdp_work_done))
- ena_unmask_interrupt(xdp_ring, NULL);
- ena_update_ring_numa_node(xdp_ring, NULL);
- ret = xdp_work_done;
- } else {
- ret = xdp_budget;
- }
-
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.napi_comp += napi_comp_call;
- xdp_ring->tx_stats.tx_poll++;
- u64_stats_update_end(&xdp_ring->syncp);
- xdp_ring->tx_stats.last_napi_jiffies = jiffies;
-
- return ret;
-}
-
-static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_frame *xdpf,
- struct ena_com_tx_ctx *ena_tx_ctx)
-{
- struct ena_adapter *adapter = xdp_ring->adapter;
- struct ena_com_buf *ena_buf;
- int push_len = 0;
- dma_addr_t dma;
- void *data;
- u32 size;
-
- tx_info->xdpf = xdpf;
- data = tx_info->xdpf->data;
- size = tx_info->xdpf->len;
-
- if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* Designate part of the packet for LLQ */
- push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
-
- ena_tx_ctx->push_header = data;
-
- size -= push_len;
- data += push_len;
- }
-
- ena_tx_ctx->header_len = push_len;
-
- if (size > 0) {
- dma = dma_map_single(xdp_ring->dev,
- data,
- size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
- goto error_report_dma_error;
-
- tx_info->map_linear_data = 0;
-
- ena_buf = tx_info->bufs;
- ena_buf->paddr = dma;
- ena_buf->len = size;
-
- ena_tx_ctx->ena_bufs = ena_buf;
- ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
- }
-
- return 0;
-
-error_report_dma_error:
- ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
- &xdp_ring->syncp);
- netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
-
- return -EINVAL;
-}
-
-static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
- struct net_device *dev,
- struct xdp_frame *xdpf,
- int flags)
-{
- struct ena_com_tx_ctx ena_tx_ctx = {};
- struct ena_tx_buffer *tx_info;
- u16 next_to_use, req_id;
- int rc;
-
- next_to_use = xdp_ring->next_to_use;
- req_id = xdp_ring->free_ids[next_to_use];
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
-
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
- if (unlikely(rc))
- return rc;
-
- ena_tx_ctx.req_id = req_id;
-
- rc = ena_xmit_common(dev,
- xdp_ring,
- tx_info,
- &ena_tx_ctx,
- next_to_use,
- xdpf->len);
- if (rc)
- goto error_unmap_dma;
-
- /* trigger the dma engine. ena_ring_tx_doorbell()
- * calls a memory barrier inside it.
- */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- return rc;
-
-error_unmap_dma:
- ena_unmap_tx_buff(xdp_ring, tx_info);
- tx_info->xdpf = NULL;
- return rc;
-}
-
-static int ena_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
-{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_ring *xdp_ring;
- int qid, i, nxmit = 0;
-
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
- return -ENETDOWN;
-
- /* We assume that all rings have the same XDP program */
- if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
- return -ENXIO;
-
- qid = smp_processor_id() % adapter->xdp_num_queues;
- qid += adapter->xdp_first_ring;
- xdp_ring = &adapter->tx_ring[qid];
-
- /* Other CPU ids might try to send thorugh this queue */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- for (i = 0; i < n; i++) {
- if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
- break;
- nxmit++;
- }
-
- /* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
-
- /* Return number of packets sent */
- return nxmit;
-}
-
-static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
-{
- u32 verdict = ENA_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ena_ring *xdp_ring;
- struct xdp_frame *xdpf;
- u64 *xdp_stat;
-
- xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
-
- if (!xdp_prog)
- goto out;
-
- verdict = bpf_prog_run_xdp(xdp_prog, xdp);
-
- switch (verdict) {
- case XDP_TX:
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- }
-
- /* Find xmit queue */
- xdp_ring = rx_ring->xdp_ring;
-
- /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
- XDP_XMIT_FLUSH))
- xdp_return_frame(xdpf);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
- xdp_stat = &rx_ring->rx_stats.xdp_tx;
- verdict = ENA_XDP_TX;
- break;
- case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
- xdp_stat = &rx_ring->rx_stats.xdp_redirect;
- verdict = ENA_XDP_REDIRECT;
- break;
- }
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_ABORTED:
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_DROP:
- xdp_stat = &rx_ring->rx_stats.xdp_drop;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_PASS:
- xdp_stat = &rx_ring->rx_stats.xdp_pass;
- verdict = ENA_XDP_PASS;
- break;
- default:
- bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_invalid;
- verdict = ENA_XDP_DROP;
- }
-
- ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
-out:
- return verdict;
-}
-
-static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
-{
- adapter->xdp_first_ring = adapter->num_io_queues;
- adapter->xdp_num_queues = adapter->num_io_queues;
-
- ena_init_io_rings(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
-}
-
-static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
-{
- u32 xdp_first_ring = adapter->xdp_first_ring;
- u32 xdp_num_queues = adapter->xdp_num_queues;
- int rc = 0;
-
- rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
- if (rc)
- goto setup_err;
-
- rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
- if (rc)
- goto create_err;
-
- return 0;
-
-create_err:
- ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
-setup_err:
- return rc;
-}
-
-/* Provides a way for both kernel and bpf-prog to know
- * more about the RX-queue a given XDP frame arrived on.
- */
-static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
-{
- int rc;
-
- rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- goto err;
- }
-
- rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
- NULL);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- }
-
-err:
- return rc;
-}
-
-static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
-{
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-}
-
-static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
- struct bpf_prog *prog,
- int first, int count)
-{
- struct bpf_prog *old_bpf_prog;
- struct ena_ring *rx_ring;
- int i = 0;
-
- for (i = first; i < count; i++) {
- rx_ring = &adapter->rx_ring[i];
- old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
-
- if (!old_bpf_prog && prog) {
- ena_xdp_register_rxq_info(rx_ring);
- rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
- } else if (old_bpf_prog && !prog) {
- ena_xdp_unregister_rxq_info(rx_ring);
- rx_ring->rx_headroom = NET_SKB_PAD;
- }
- }
-}
-
-static void ena_xdp_exchange_program(struct ena_adapter *adapter,
- struct bpf_prog *prog)
-{
- struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
-
- ena_xdp_exchange_program_rx_in_range(adapter,
- prog,
- 0,
- adapter->num_io_queues);
-
- if (old_bpf_prog)
- bpf_prog_put(old_bpf_prog);
-}
-
-static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
-{
- bool was_up;
- int rc;
-
- was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
-
- if (was_up)
- ena_down(adapter);
-
- adapter->xdp_first_ring = 0;
- adapter->xdp_num_queues = 0;
- ena_xdp_exchange_program(adapter, NULL);
- if (was_up) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- return 0;
-}
-
-static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- struct bpf_prog *prog = bpf->prog;
- struct bpf_prog *old_bpf_prog;
- int rc, prev_mtu;
- bool is_up;
-
- is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
- rc = ena_xdp_allowed(adapter);
- if (rc == ENA_XDP_ALLOWED) {
- old_bpf_prog = adapter->xdp_bpf_prog;
- if (prog) {
- if (!is_up) {
- ena_init_all_xdp_queues(adapter);
- } else if (!old_bpf_prog) {
- ena_down(adapter);
- ena_init_all_xdp_queues(adapter);
- }
- ena_xdp_exchange_program(adapter, prog);
-
- if (is_up && !old_bpf_prog) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- xdp_features_set_redirect_target(netdev, false);
- } else if (old_bpf_prog) {
- xdp_features_clear_redirect_target(netdev);
- rc = ena_destroy_and_free_all_xdp_queues(adapter);
- if (rc)
- return rc;
- }
-
- prev_mtu = netdev->max_mtu;
- netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
-
- if (!old_bpf_prog)
- netif_info(adapter, drv, adapter->netdev,
- "XDP program is set, changing the max_mtu from %d to %d",
- prev_mtu, netdev->max_mtu);
-
- } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
- netdev->mtu, ENA_XDP_MAX_MTU);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
- return -EINVAL;
- } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
- adapter->num_io_queues, adapter->max_num_io_queues);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
- * program as well as to query the current xdp program id.
- */
-static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return ena_xdp_set(netdev, bpf);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +180,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -820,9 +312,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count)
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i, rc = 0;
@@ -845,8 +336,8 @@ err_setup_tx:
return rc;
}
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i;
@@ -859,7 +350,7 @@ static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
*
* Free all transmit software resources
*/
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
ena_free_all_io_tx_resources_in_range(adapter,
0,
@@ -1169,8 +660,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -1262,6 +753,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
@@ -1272,8 +764,8 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
-static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
- struct ena_tx_buffer *tx_info, bool is_xdp)
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
{
if (tx_info)
netif_err(ring->adapter,
@@ -1305,17 +797,6 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
-static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
-{
- struct ena_tx_buffer *tx_info;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
-
- return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
-}
-
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
{
struct netdev_queue *txq;
@@ -1363,7 +844,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
skb);
- tx_bytes += skb->len;
+ tx_bytes += tx_info->total_tx_size;
dev_kfree_skb(skb);
tx_pkts++;
total_done += tx_info->tx_descs;
@@ -1688,6 +1169,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u
return ret;
}
+
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1880,8 +1362,8 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
rx_ring->per_napi_packets = 0;
}
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
@@ -1913,8 +1395,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
int cpu = get_cpu();
int numa_node;
@@ -1949,67 +1431,6 @@ out:
put_cpu();
}
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
-{
- u32 total_done = 0;
- u16 next_to_clean;
- int tx_pkts = 0;
- u16 req_id;
- int rc;
-
- if (unlikely(!xdp_ring))
- return 0;
- next_to_clean = xdp_ring->next_to_clean;
-
- while (tx_pkts < budget) {
- struct ena_tx_buffer *tx_info;
- struct xdp_frame *xdpf;
-
- rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
- &req_id);
- if (rc) {
- if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(xdp_ring, req_id, NULL,
- true);
- break;
- }
-
- /* validate that the request id points to a valid xdp_frame */
- rc = validate_xdp_req_id(xdp_ring, req_id);
- if (rc)
- break;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- xdpf = tx_info->xdpf;
-
- tx_info->xdpf = NULL;
- tx_info->last_jiffies = 0;
- ena_unmap_tx_buff(xdp_ring, tx_info);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
- xdpf);
-
- tx_pkts++;
- total_done += tx_info->tx_descs;
-
- xdp_return_frame(xdpf);
- xdp_ring->free_ids[next_to_clean] = req_id;
- next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
- xdp_ring->ring_size);
- }
-
- xdp_ring->next_to_clean = next_to_clean;
- ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d done. total pkts: %d\n",
- xdp_ring->qid, tx_pkts);
-
- return tx_pkts;
-}
-
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
@@ -2326,28 +1747,36 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
netif_napi_del(&adapter->ena_napi[i].napi);
- WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
- adapter->ena_napi[i].xdp_ring);
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].rx_ring);
}
}
static void ena_init_napi_in_range(struct ena_adapter *adapter,
int first_index, int count)
{
+ int (*napi_handler)(struct napi_struct *napi, int budget);
int i;
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
+ struct ena_ring *rx_ring, *tx_ring;
- netif_napi_add(adapter->netdev, &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
+ memset(napi, 0, sizeof(*napi));
- if (!ENA_IS_XDP_INDEX(adapter, i)) {
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
- } else {
- napi->xdp_ring = &adapter->tx_ring[i];
- }
+ rx_ring = &adapter->rx_ring[i];
+ tx_ring = &adapter->tx_ring[i];
+
+ napi_handler = ena_io_poll;
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ napi_handler = ena_xdp_io_poll;
+
+ netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+
+ if (!ENA_IS_XDP_INDEX(adapter, i))
+ napi->rx_ring = rx_ring;
+
+ napi->tx_ring = tx_ring;
napi->qid = i;
}
}
@@ -2475,8 +1904,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
@@ -2556,12 +1985,15 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
if (rc)
goto create_err;
INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
+
+ ena_xdp_register_rxq_info(&adapter->rx_ring[i]);
}
return 0;
create_err:
while (i--) {
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
}
@@ -2686,7 +2118,7 @@ err_setup_tx:
}
}
-static int ena_up(struct ena_adapter *adapter)
+int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
@@ -2748,7 +2180,7 @@ err_req_irq:
return rc;
}
-static void ena_down(struct ena_adapter *adapter)
+void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
@@ -3179,7 +2611,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
- rc = ena_xmit_common(dev,
+ rc = ena_xmit_common(adapter,
tx_ring,
tx_info,
&ena_tx_ctx,
@@ -3275,8 +2707,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
- strncpy(host_info->os_dist_str, utsname()->release,
- sizeof(host_info->os_dist_str) - 1);
+ strscpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str));
host_info->driver_version =
(DRV_MODULE_GEN_MAJOR) |
(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -3363,6 +2795,7 @@ static void ena_get_stats64(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_ring *rx_ring, *tx_ring;
+ u64 total_xdp_rx_drops = 0;
unsigned int start;
u64 rx_drops;
u64 tx_drops;
@@ -3371,8 +2804,8 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_io_queues; i++) {
- u64 bytes, packets;
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
+ u64 bytes, packets, xdp_rx_drops;
tx_ring = &adapter->tx_ring[i];
@@ -3385,16 +2818,22 @@ static void ena_get_stats64(struct net_device *netdev,
stats->tx_packets += packets;
stats->tx_bytes += bytes;
+ /* In XDP there isn't an RX queue counterpart */
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ continue;
+
rx_ring = &adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
+ xdp_rx_drops = rx_ring->rx_stats.xdp_drop;
} while (u64_stats_fetch_retry(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+ total_xdp_rx_drops += xdp_rx_drops;
}
do {
@@ -3403,7 +2842,7 @@ static void ena_get_stats64(struct net_device *netdev,
tx_drops = adapter->dev_stats.tx_drops;
} while (u64_stats_fetch_retry(&adapter->syncp, start));
- stats->rx_dropped = rx_drops;
+ stats->rx_dropped = rx_drops + total_xdp_rx_drops;
stats->tx_dropped = tx_drops;
stats->multicast = 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 33c923e1261a..6d2cc20210cc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -110,19 +110,6 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
-/* The max MTU size is configured to be the ethernet frame size without
- * the overhead of the ethernet header, which can have a VLAN header, and
- * a frame check sequence (FCS).
- * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
- */
-
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM - \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-
-#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
- ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
-
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -138,13 +125,18 @@ struct ena_napi {
struct napi_struct napi;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- struct ena_ring *xdp_ring;
u32 qid;
struct dim dim;
};
struct ena_tx_buffer {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ };
/* num of ena desc for this specific skb
* (includes data desc and metadata desc)
*/
@@ -152,16 +144,14 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
- /* XDP buffer structure which is used for sending packets in
- * the xdp queues
- */
- struct xdp_frame *xdpf;
+ /* Total size of all buffers in bytes */
+ u32 total_tx_size;
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
/* Used for detect missing tx packets to limit the number of prints */
- u32 print_once;
+ u8 print_once;
/* Save the last jiffies to detect missing tx packets
*
* sets to non zero value on ena_start_xmit and set to zero on
@@ -421,47 +411,44 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
-enum ena_xdp_errors_t {
- ENA_XDP_ALLOWED = 0,
- ENA_XDP_CURRENT_MTU_TOO_LARGE,
- ENA_XDP_NO_ENOUGH_QUEUES,
-};
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp);
-enum ENA_XDP_ACTIONS {
- ENA_XDP_PASS = 0,
- ENA_XDP_TX = BIT(0),
- ENA_XDP_REDIRECT = BIT(1),
- ENA_XDP_DROP = BIT(2)
-};
-
-#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
-
-static inline bool ena_xdp_present(struct ena_adapter *adapter)
-{
- return !!adapter->xdp_bpf_prog;
-}
-
-static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+static inline void ena_increase_stat(u64 *statp, u64 cnt,
+ struct u64_stats_sync *syncp)
{
- return !!ring->xdp_bpf_prog;
+ u64_stats_update_begin(syncp);
+ (*statp) += cnt;
+ u64_stats_update_end(syncp);
}
-static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
- u32 queues)
+static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
{
- return 2 * queues <= adapter->max_num_io_queues;
-}
-
-static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
-{
- enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
-
- if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
- rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
- else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
- rc = ENA_XDP_NO_ENOUGH_QUEUES;
-
- return rc;
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
}
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes);
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+void ena_down(struct ena_adapter *adapter);
+int ena_up(struct ena_adapter *adapter);
+void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring);
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
new file mode 100644
index 000000000000..fc1c4ef73ba3
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "ena_xdp.h"
+
+static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
+}
+
+static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_adapter *adapter = tx_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
+ u32 size;
+
+ tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
+ size = tx_info->xdpf->len;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, tx_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
+
+ if (size > 0) {
+ dma = dma_map_single(tx_ring->dev,
+ data,
+ size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 0;
+
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
+
+ return 0;
+
+error_report_dma_error:
+ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+ &tx_ring->syncp);
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+
+ return -EINVAL;
+}
+
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags)
+{
+ struct ena_com_tx_ctx ena_tx_ctx = {};
+ struct ena_tx_buffer *tx_info;
+ u16 next_to_use, req_id;
+ int rc;
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
+ if (unlikely(rc))
+ return rc;
+
+ ena_tx_ctx.req_id = req_id;
+
+ rc = ena_xmit_common(adapter,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdpf->len);
+ if (rc)
+ goto error_unmap_dma;
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
+ */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ return rc;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(tx_ring, tx_info);
+ tx_info->xdpf = NULL;
+ return rc;
+}
+
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_ring *tx_ring;
+ int qid, i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return -ENETDOWN;
+
+ /* We assume that all rings have the same XDP program */
+ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+ return -ENXIO;
+
+ qid = smp_processor_id() % adapter->xdp_num_queues;
+ qid += adapter->xdp_first_ring;
+ tx_ring = &adapter->tx_ring[qid];
+
+ /* Other CPU ids might try to send thorugh this queue */
+ spin_lock(&tx_ring->xdp_tx_lock);
+
+ for (i = 0; i < n; i++) {
+ if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
+ break;
+ nxmit++;
+ }
+
+ /* Ring doorbell to make device aware of the packets */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ spin_unlock(&tx_ring->xdp_tx_lock);
+
+ /* Return number of packets sent */
+ return nxmit;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ u32 xdp_first_ring = adapter->xdp_first_ring;
+ u32 xdp_num_queues = adapter->xdp_num_queues;
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
+
+ netif_dbg(rx_ring->adapter, ifup, rx_ring->netdev, "Registering RX info for queue %d",
+ rx_ring->qid);
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ netif_dbg(rx_ring->adapter, ifdown, rx_ring->netdev,
+ "Unregistering RX info for queue %d",
+ rx_ring->qid);
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count)
+{
+ struct bpf_prog *old_bpf_prog;
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+ if (!old_bpf_prog && prog) {
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else if (old_bpf_prog && !prog) {
+ rx_ring->rx_headroom = NET_SKB_PAD;
+ }
+ }
+}
+
+static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ netif_dbg(adapter, drv, adapter->netdev, "Set a new XDP program\n");
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ xdp_features_set_redirect_target(netdev, false);
+ } else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
+ netif_dbg(adapter, drv, adapter->netdev, "Removing XDP program\n");
+
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "XDP program is set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!tx_ring))
+ return 0;
+ next_to_clean = tx_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
+ &req_id);
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL, true);
+ break;
+ }
+
+ /* validate that the request id points to a valid xdp_frame */
+ rc = validate_xdp_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ tx_info->last_jiffies = 0;
+
+ xdpf = tx_info->xdpf;
+ tx_info->xdpf = NULL;
+ ena_unmap_tx_buff(tx_ring, tx_info);
+ xdp_return_frame(xdpf);
+
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+ tx_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id);
+ }
+
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ tx_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ struct ena_ring *tx_ring;
+ u32 work_done;
+ int ret;
+
+ tx_ring = ena_napi->tx_ring;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ work_done = ena_clean_xdp_irq(tx_ring, budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (budget > work_done) {
+ ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1,
+ &tx_ring->syncp);
+ if (napi_complete_done(napi, work_done))
+ ena_unmask_interrupt(tx_ring, NULL);
+
+ ena_update_ring_numa_node(tx_ring, NULL);
+ ret = work_done;
+ } else {
+ ret = budget;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.h b/drivers/net/ethernet/amazon/ena/ena_xdp.h
new file mode 100644
index 000000000000..cfd82728486a
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_XDP_H
+#define ENA_XDP_H
+
+#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
+
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
+enum ENA_XDP_ACTIONS {
+ ENA_XDP_PASS = 0,
+ ENA_XDP_TX = BIT(0),
+ ENA_XDP_REDIRECT = BIT(1),
+ ENA_XDP_DROP = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count);
+int ena_xdp_io_poll(struct napi_struct *napi, int budget);
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags);
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring);
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring);
+
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
+static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ u32 verdict = ENA_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ena_ring *xdp_ring;
+ struct xdp_frame *xdpf;
+ u64 *xdp_stat;
+
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ switch (verdict) {
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ }
+
+ /* Find xmit queue */
+ xdp_ring = rx_ring->xdp_ring;
+
+ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
+ XDP_XMIT_FLUSH))
+ xdp_return_frame(xdpf);
+
+ spin_unlock(&xdp_ring->xdp_tx_lock);
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ verdict = ENA_XDP_TX;
+ break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ verdict = ENA_XDP_REDIRECT;
+ break;
+ }
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_DROP:
+ xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_PASS:
+ xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ verdict = ENA_XDP_PASS;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ verdict = ENA_XDP_DROP;
+ }
+
+ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
+
+ return verdict;
+}
+#endif /* ENA_XDP_H */
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index 5beadabc2136..ea773cfa0af6 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
return nq_work;
}
+static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
+{
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
+ pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
+ return false;
+
+ return refcount_inc_not_zero(&pdsc->adminq_refcnt);
+}
+
void pdsc_process_adminq(struct pdsc_qcq *qcq)
{
union pds_core_adminq_comp *comp;
@@ -75,9 +84,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
int aq_work = 0;
int credits;
- /* Don't process AdminQ when shutting down */
- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
__func__);
return;
}
@@ -124,6 +133,7 @@ credits:
pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
credits,
PDS_CORE_INTR_CRED_REARM);
+ refcount_dec(&pdsc->adminq_refcnt);
}
void pdsc_work_thread(struct work_struct *work)
@@ -135,18 +145,20 @@ void pdsc_work_thread(struct work_struct *work)
irqreturn_t pdsc_adminq_isr(int irq, void *data)
{
- struct pdsc_qcq *qcq = data;
- struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc *pdsc = data;
+ struct pdsc_qcq *qcq;
- /* Don't process AdminQ when shutting down */
- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
__func__);
return IRQ_HANDLED;
}
+ qcq = &pdsc->adminqcq;
queue_work(pdsc->wq, &qcq->work);
pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+ refcount_dec(&pdsc->adminq_refcnt);
return IRQ_HANDLED;
}
@@ -179,10 +191,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Check that the FW is running */
if (!pdsc_is_fw_running(pdsc)) {
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
- dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
- __func__, fw_status);
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
+ __func__);
+ }
ret = -ENXIO;
goto err_out_unlock;
@@ -230,6 +248,12 @@ int pdsc_adminq_post(struct pdsc *pdsc,
int err = 0;
int index;
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
+ __func__, cmd->opcode);
+ return -ENXIO;
+ }
+
wc.qcq = &pdsc->adminqcq;
index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
if (index < 0) {
@@ -248,10 +272,16 @@ int pdsc_adminq_post(struct pdsc *pdsc,
break;
if (!pdsc_is_fw_running(pdsc)) {
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
- dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
- __func__, fw_status);
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
+ __func__);
+ }
err = -ENXIO;
break;
}
@@ -285,6 +315,8 @@ err_out:
queue_work(pdsc->wq, &pdsc->health_work);
}
+ refcount_dec(&pdsc->adminq_refcnt);
+
return err;
}
EXPORT_SYMBOL_GPL(pdsc_adminq_post);
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 11c23a7f3172..fd1a5149c003 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
if (err < 0) {
dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out;
+ kfree(padev);
+ return ERR_PTR(err);
}
err = auxiliary_device_add(aux_dev);
if (err) {
dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out_uninit;
+ auxiliary_device_uninit(aux_dev);
+ return ERR_PTR(err);
}
return padev;
-
-err_out_uninit:
- auxiliary_device_uninit(aux_dev);
-err_out:
- kfree(padev);
- return ERR_PTR(err);
}
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 0d2091e9eb28..7658a7286767 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -125,7 +125,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
snprintf(name, sizeof(name), "%s-%d-%s",
PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
- index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
+ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
if (index < 0)
return index;
qcq->intx = index;
@@ -404,10 +404,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
int numdescs;
int err;
- if (init)
- err = pdsc_dev_init(pdsc);
- else
- err = pdsc_dev_reinit(pdsc);
+ err = pdsc_dev_init(pdsc);
if (err)
return err;
@@ -450,6 +447,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
pdsc_debugfs_add_viftype(pdsc);
}
+ refcount_set(&pdsc->adminq_refcnt, 1);
clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
return 0;
@@ -464,6 +462,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
if (!pdsc->pdev->is_virtfn)
pdsc_devcmd_reset(pdsc);
+ if (pdsc->adminqcq.work.func)
+ cancel_work_sync(&pdsc->adminqcq.work);
pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
pdsc_qcq_free(pdsc, &pdsc->adminqcq);
@@ -476,10 +476,9 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
for (i = 0; i < pdsc->nintrs; i++)
pdsc_intr_free(pdsc, i);
- if (removing) {
- kfree(pdsc->intr_info);
- pdsc->intr_info = NULL;
- }
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
}
if (pdsc->kern_dbpage) {
@@ -487,6 +486,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
pdsc->kern_dbpage = NULL;
}
+ pci_free_irq_vectors(pdsc->pdev);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
}
@@ -512,6 +512,24 @@ void pdsc_stop(struct pdsc *pdsc)
PDS_CORE_INTR_MASK_SET);
}
+static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
+{
+ /* The driver initializes the adminq_refcnt to 1 when the adminq is
+ * allocated and ready for use. Other users/requesters will increment
+ * the refcnt while in use. If the refcnt is down to 1 then the adminq
+ * is not in use and the refcnt can be cleared and adminq freed. Before
+ * calling this function the driver will set PDSC_S_FW_DEAD, which
+ * prevent subsequent attempts to use the adminq and increment the
+ * refcnt to fail. This guarantees that this function will eventually
+ * exit.
+ */
+ while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
+ dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
+ __func__);
+ cpu_relax();
+ }
+}
+
void pdsc_fw_down(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
@@ -527,6 +545,8 @@ void pdsc_fw_down(struct pdsc *pdsc)
if (pdsc->pdev->is_virtfn)
return;
+ pdsc_adminq_wait_and_dec_once_unused(pdsc);
+
/* Notify clients of fw_down */
if (pdsc->fw_reporter)
devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
@@ -577,7 +597,13 @@ err_out:
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+ u8 fw_status;
+
+ /* some sort of teardown already in progress */
+ if (!pdsc->info_regs)
+ return;
+
+ fw_status = ioread8(&pdsc->info_regs->fw_status);
/* is PCI broken? */
if (fw_status != PDS_RC_BAD_PCI)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index e35d3e7006bf..110c4b826b22 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -184,6 +184,7 @@ struct pdsc {
struct mutex devcmd_lock; /* lock for dev_cmd operations */
struct mutex config_lock; /* lock for configuration operations */
spinlock_t adminq_lock; /* lock for adminq operations */
+ refcount_t adminq_refcnt;
struct pds_core_dev_info_regs __iomem *info_regs;
struct pds_core_dev_cmd_regs __iomem *cmd_regs;
struct pds_core_intr __iomem *intr_ctrl;
@@ -280,7 +281,6 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
union pds_core_dev_comp *comp, int max_seconds);
int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
-int pdsc_dev_reinit(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
void pdsc_reset_prepare(struct pci_dev *pdev);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 8ec392299b7d..4e8579ca1c8c 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity);
void pdsc_debugfs_add_ident(struct pdsc *pdsc)
{
+ /* This file will already exist in the reset flow */
+ if (debugfs_lookup("identity", pdsc->dentry))
+ return;
+
debugfs_create_file("identity", 0400, pdsc->dentry,
pdsc, &identity_fops);
}
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index 31940b857e0e..e65a1632df50 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -57,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
bool pdsc_is_fw_running(struct pdsc *pdsc)
{
+ if (!pdsc->info_regs)
+ return false;
+
pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
pdsc->last_fw_time = jiffies;
pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
@@ -182,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
{
int err;
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
pdsc_devcmd_dbell(pdsc);
err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
- memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
queue_work(pdsc->wq, &pdsc->health_work);
+ else
+ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
return err;
}
@@ -309,13 +316,6 @@ static int pdsc_identify(struct pdsc *pdsc)
return 0;
}
-int pdsc_dev_reinit(struct pdsc *pdsc)
-{
- pdsc_init_devinfo(pdsc);
-
- return pdsc_identify(pdsc);
-}
-
int pdsc_dev_init(struct pdsc *pdsc)
{
unsigned int nintrs;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index e9948ea5bbcd..54864f27c87a 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
mutex_lock(&pdsc->devcmd_lock);
err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
- memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ if (!err)
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
if (err && err != -EIO)
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
index 90a811f3878a..fa626719e68d 100644
--- a/drivers/net/ethernet/amd/pds_core/fw.c
+++ b/drivers/net/ethernet/amd/pds_core/fw.c
@@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
dev_info(pdsc->dev, "Installing firmware\n");
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
dl = priv_to_devlink(pdsc);
devlink_flash_update_status_notify(dl, "Preparing to flash",
NULL, 0, 0);
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 3080898d7b95..0050c5894563 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -37,6 +37,11 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
struct pdsc_dev_bar *bars = pdsc->bars;
unsigned int i;
+ pdsc->info_regs = NULL;
+ pdsc->cmd_regs = NULL;
+ pdsc->intr_status = NULL;
+ pdsc->intr_ctrl = NULL;
+
for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
if (bars[i].vaddr)
pci_iounmap(pdsc->pdev, bars[i].vaddr);
@@ -293,7 +298,7 @@ err_out_stop:
err_out_teardown:
pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
err_out_unmap_bars:
- del_timer_sync(&pdsc->wdtimer);
+ timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
mutex_destroy(&pdsc->config_lock);
@@ -420,7 +425,7 @@ static void pdsc_remove(struct pci_dev *pdev)
*/
pdsc_sriov_configure(pdev, 0);
- del_timer_sync(&pdsc->wdtimer);
+ timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
@@ -433,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev)
mutex_destroy(&pdsc->config_lock);
mutex_destroy(&pdsc->devcmd_lock);
- pci_free_irq_vectors(pdev);
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
}
@@ -445,13 +449,32 @@ static void pdsc_remove(struct pci_dev *pdev)
devlink_free(dl);
}
+static void pdsc_stop_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->health_work.func)
+ cancel_work_sync(&pdsc->health_work);
+}
+
+static void pdsc_restart_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ mod_timer(&pdsc->wdtimer, jiffies + 1);
+}
+
void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
+ pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
- pci_free_irq_vectors(pdev);
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -486,6 +509,7 @@ void pdsc_reset_done(struct pci_dev *pdev)
}
pdsc_fw_up(pdsc);
+ pdsc_restart_health_thread(pdsc);
}
static const struct pci_error_handlers pdsc_err_handler = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 32fab5e77246..58e7e88aae5b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -527,47 +527,48 @@ static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
return ARRAY_SIZE(pdata->rss_table);
}
-static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int xgbe_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
- indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
- MAC_RSSDR, DMCH);
+ rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
}
- if (key)
- memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+ if (rxfh->key)
+ memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int xgbe_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "unsupported hash function\n");
return -EOPNOTSUPP;
}
- if (indir) {
- ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (rxfh->indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
if (ret)
return ret;
}
- if (key) {
- ret = hw_if->set_rss_hash_key(pdata, key);
+ if (rxfh->key) {
+ ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad136ed493ed..f01a1e566da6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -495,7 +495,7 @@ struct xgbe_ring {
* a DMA channel.
*/
struct xgbe_channel {
- char name[16];
+ char name[20];
/* Address of private data area for device */
struct xgbe_prv_data *pdata;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index ac4ea93bd8dd..18a6c8d99fa0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -447,8 +447,8 @@ static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
return sizeof(cfg->aq_rss.hash_secret_key);
}
-static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int aq_ethtool_get_rss(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -456,21 +456,21 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
cfg = aq_nic_get_cfg(aq_nic);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
- if (indir) {
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ if (rxfh->indir) {
for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
- indir[i] = cfg->aq_rss.indirection_table[i];
+ rxfh->indir[i] = cfg->aq_rss.indirection_table[i];
}
- if (key)
- memcpy(key, cfg->aq_rss.hash_secret_key,
+ if (rxfh->key)
+ memcpy(rxfh->key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
return 0;
}
-static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int aq_ethtool_set_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(netdev);
struct aq_nic_cfg_s *cfg;
@@ -482,16 +482,17 @@ static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
rss_entries = cfg->aq_rss.indirection_table_size;
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < rss_entries; i++)
- cfg->aq_rss.indirection_table[i] = indir[i];
+ cfg->aq_rss.indirection_table[i] = rxfh->indir[i];
/* Fill out the rss hash key */
- if (key) {
- memcpy(cfg->aq_rss.hash_secret_key, key,
+ if (rxfh->key) {
+ memcpy(cfg->aq_rss.hash_secret_key, rxfh->key,
sizeof(cfg->aq_rss.hash_secret_key));
err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
&cfg->aq_rss);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 28c9b6f1a54f..5acb3e16b567 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -953,8 +953,6 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
- struct aq_ring_s *hwts;
- struct aq_ring_s *ring;
int err;
if (!aq_ptp)
@@ -962,29 +960,23 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
- ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
- tx_ring_idx, &aq_nic->aq_nic_cfg);
- if (!ring) {
- err = -ENOMEM;
+ err = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+ tx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (err)
goto err_exit;
- }
rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
- ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
- rx_ring_idx, &aq_nic->aq_nic_cfg);
- if (!ring) {
- err = -ENOMEM;
+ err = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+ rx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (err)
goto err_exit_ptp_tx;
- }
- hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
- aq_nic->aq_nic_cfg.rxds,
- aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
- if (!hwts) {
- err = -ENOMEM;
+ err = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+ aq_nic->aq_nic_cfg.rxds,
+ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+ if (err)
goto err_exit_ptp_rx;
- }
err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
if (err != 0) {
@@ -1001,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
return 0;
err_exit_hwts_rx:
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
err_exit_ptp_rx:
aq_ring_free(&aq_ptp->ptp_rx);
err_exit_ptp_tx:
@@ -1019,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
aq_ring_free(&aq_ptp->ptp_tx);
aq_ring_free(&aq_ptp->ptp_rx);
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index e1885c1eb100..f7433abd6591 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -132,8 +132,8 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
return 0;
}
-static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic)
+static int aq_ring_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic)
{
int err = 0;
@@ -156,46 +156,29 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err_exit:
if (err < 0) {
aq_ring_free(self);
- self = NULL;
}
- return self;
+ return err;
}
-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
- int err = 0;
-
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->txds;
self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
- self = aq_ring_alloc(self, aq_nic);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
-err_exit:
- if (err < 0) {
- aq_ring_free(self);
- self = NULL;
- }
-
- return self;
+ return aq_ring_alloc(self, aq_nic);
}
-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
- int err = 0;
-
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->rxds;
@@ -217,22 +200,10 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->tail_size = 0;
}
- self = aq_ring_alloc(self, aq_nic);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
-err_exit:
- if (err < 0) {
- aq_ring_free(self);
- self = NULL;
- }
-
- return self;
+ return aq_ring_alloc(self, aq_nic);
}
-struct aq_ring_s *
+int
aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, unsigned int size, unsigned int dx_size)
{
@@ -250,10 +221,10 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
GFP_KERNEL);
if (!self->dx_ring) {
aq_ring_free(self);
- return NULL;
+ return -ENOMEM;
}
- return self;
+ return 0;
}
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
@@ -948,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
}
}
+void aq_ring_hwts_rx_free(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
+ self->dx_ring, self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
+}
+
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
{
unsigned int count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 0a6c34438c1d..d627ace850ff 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -183,14 +183,14 @@ static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
self->sw_head - self->sw_tail - 1);
}
-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg);
-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
@@ -207,9 +207,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
-struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic, unsigned int idx,
- unsigned int size, unsigned int dx_size);
+int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_free(struct aq_ring_s *self);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f5db1c44e9b9..9769ab4f9bef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -136,35 +136,32 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
i, idx);
- ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
- idx_ring, aq_nic_cfg);
- if (!ring) {
- err = -ENOMEM;
+ ring = &self->ring[i][AQ_VEC_TX_ID];
+ err = aq_ring_tx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
+ if (err)
goto err_exit;
- }
++self->tx_rings;
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
- if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ ring = &self->ring[i][AQ_VEC_RX_ID];
+ if (xdp_rxq_info_reg(&ring->xdp_rxq,
aq_nic->ndev, idx,
self->napi.napi_id) < 0) {
err = -ENOMEM;
goto err_exit;
}
- if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ if (xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL) < 0) {
- xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
- ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
- idx_ring, aq_nic_cfg);
- if (!ring) {
- xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
- err = -ENOMEM;
+ err = aq_ring_rx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
+ if (err) {
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
goto err_exit;
}
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e551ffaed20d..11e8996b33d7 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -284,7 +284,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
ax88796c_proc_tx_hdr(&info, skb->ip_summed);
/* SOP and SEG header */
- memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.tx_overhead, TX_OVERHEAD);
/* Write SPI TXQ header */
memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
index 4a83c991dcbe..68a09edecab8 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.h
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -25,7 +25,7 @@
#define AX88796C_PHY_REGDUMP_LEN 14
#define AX88796C_PHY_ID 0x10
-#define TX_OVERHEAD 8
+#define TX_OVERHEAD sizeof_field(struct tx_pkt_info, tx_overhead)
#define TX_EOP_SIZE 4
#define AX_MCAST_FILTER_SIZE 8
@@ -549,8 +549,10 @@ struct tx_eop_header {
};
struct tx_pkt_info {
- struct tx_sop_header sop;
- struct tx_segment_header seg;
+ struct_group(tx_overhead,
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ );
struct tx_eop_header eop;
u16 pkt_len;
u16 seq_num;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 29b04a274d07..80245c65cc90 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
- if (j == *rule_cnt)
- return -EMSGSIZE;
-
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
priv->net_filters[i - 1].wake_filter)
continue;
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
rule_locs[j++] = priv->net_filters[i].fs.location;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 53e542881255..6ad1366270f7 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -684,6 +684,8 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+ if (!buffer_pg)
+ return -ENOMEM;
dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
@@ -1048,6 +1050,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netdev_err(dev, "could not attach to PHY\n");
goto err_phy_disable;
}
+
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
} else if (!intf->wolopts) {
ret = phy_resume(dev->phydev);
if (ret)
@@ -1092,6 +1097,7 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
err_reclaim_tx:
+ netif_napi_del(&intf->tx_napi);
bcmasp_reclaim_free_all_tx(intf);
err_phy_disconnect:
if (phydev)
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 3e7c8671cd11..72df1bb10172 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 9b83d5361699..50b8e97a811d 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6e4f36aaf5db..36f9bad28e6a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 0b21fd5bd457..77425c7a32db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 448a1b90de5e..6ffdc4229407 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bda3ccc28eca..81d232e6d05f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3486,16 +3486,15 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnx2x_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
/* Get the current configuration of the RSS indirection table */
@@ -3511,13 +3510,14 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
* queue.
*/
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
- indir[i] = ind_table[i] - bp->fp->cl_id;
+ rxfh->indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnx2x_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
@@ -3525,11 +3525,12 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
@@ -3542,7 +3543,7 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
* align the received table to the Client ID of the leading RSS
* queue
*/
- bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = rxfh->indir[i] + bp->fp->cl_id;
}
if (bp->state == BNX2X_STATE_OPEN)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e1f1e646cf48..39845d556baf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -120,6 +120,10 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
+ [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
+ [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
@@ -174,6 +178,10 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
+ { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
+ { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
+ { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
@@ -254,22 +262,28 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
- writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
(db)->doorbell)
+#define BNXT_DB_NQ_P7(db, idx) \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
+
#define BNXT_DB_CQ_ARM(db, idx) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
- (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ BNXT_DB_NQ_P7(db, idx);
+ else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_P5(db, idx);
else
BNXT_DB_CQ(db, idx);
@@ -277,7 +291,7 @@ static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_ARM_P5(db, idx);
else
BNXT_DB_CQ_ARM(db, idx);
@@ -285,9 +299,9 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
- RING_CMP(idx), db->doorbell);
+ DB_RING_IDX(db, idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
@@ -321,7 +335,7 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
else
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
@@ -331,16 +345,16 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
}
void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- int idx)
+ u16 curr)
{
struct bnxt_napi *bnapi = txr->bnapi;
if (bnapi->tx_fault)
return;
- netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
- txr->txq_index, bnapi->tx_pkts,
- txr->tx_cons, txr->tx_prod, idx);
+ netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
+ txr->txq_index, txr->tx_hw_cons,
+ txr->tx_cons, txr->tx_prod, curr);
WARN_ON_ONCE(1);
bnapi->tx_fault = 1;
bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
@@ -381,6 +395,8 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
+ /* Sync BD data before updating doorbell */
+ wmb();
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
@@ -388,7 +404,7 @@ static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- struct tx_bd *txbd;
+ struct tx_bd *txbd, *txbd0;
struct tx_bd_ext *txbd1;
struct netdev_queue *txq;
int i;
@@ -430,11 +446,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- txbd->tx_bd_opaque = prod;
-
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->skb = skb;
tx_buf->nr_frags = last_frag;
@@ -519,12 +533,15 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
txbd->tx_bd_haddr = txr->data_mapping;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
tx_push->doorbell =
- cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
+ DB_RING_IDX(&txr->tx_db, prod));
WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1;
@@ -562,19 +579,29 @@ normal_tx:
((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
+ bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
u32 hdr_len;
- if (skb->encapsulation)
- hdr_len = skb_inner_tcp_all_headers(skb);
- else
+ if (skb->encapsulation) {
+ if (udp_gso)
+ hdr_len = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ hdr_len = skb_inner_tcp_all_headers(skb);
+ } else if (udp_gso) {
+ hdr_len = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ } else {
hdr_len = skb_tcp_all_headers(skb);
+ }
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -601,11 +628,12 @@ normal_tx:
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
+ txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
@@ -614,7 +642,7 @@ normal_tx:
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -632,22 +660,26 @@ normal_tx:
skb_tx_timestamp(skb);
- /* Sync BD data before updating doorbell */
- wmb();
-
prod = NEXT_TX(prod);
WRITE_ONCE(txr->tx_prod, prod);
- if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
bnxt_txr_db_kick(bp, txr, prod);
- else
+ } else {
+ if (free_size >= bp->tx_wake_thresh)
+ txbd0->tx_bd_len_flags_type |=
+ cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
txr->kick_pending = 1;
+ }
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (netdev_xmit_more() && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push) {
+ txbd0->tx_bd_len_flags_type &=
+ cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
bnxt_txr_db_kick(bp, txr, prod);
+ }
netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
bp->tx_wake_thresh);
@@ -662,7 +694,7 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod);
@@ -670,7 +702,7 @@ tx_dma_error:
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
@@ -686,31 +718,32 @@ tx_kick_pending:
return NETDEV_TX_OK;
}
-static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
- u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
- int nr_pkts = bnapi->tx_pkts;
- int i;
+ u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
+ u16 cons = txr->tx_cons;
+ int tx_pkts = 0;
- for (i = 0; i < nr_pkts; i++) {
+ while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
int j, last;
- tx_buf = &txr->tx_buf_ring[cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
cons = NEXT_TX(cons);
skb = tx_buf->skb;
tx_buf->skb = NULL;
if (unlikely(!skb)) {
- bnxt_sched_reset_txr(bp, txr, i);
+ bnxt_sched_reset_txr(bp, txr, cons);
return;
}
+ tx_pkts++;
tx_bytes += skb->len;
if (tx_buf->is_push) {
@@ -724,7 +757,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
for (j = 0; j < last; j++) {
cons = NEXT_TX(cons);
- tx_buf = &txr->tx_buf_ring[cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -732,7 +765,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
DMA_TO_DEVICE);
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
skb = NULL;
@@ -747,14 +780,25 @@ next_tx_int:
dev_consume_skb_any(skb);
}
- bnapi->tx_pkts = 0;
WRITE_ONCE(txr->tx_cons, cons);
- __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
+ __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
}
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
+ __bnxt_tx_int(bp, txr, budget);
+ }
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+}
+
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
@@ -803,8 +847,8 @@ static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp)
{
- struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
@@ -837,9 +881,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt *bp = rxr->bnapi->bp;
struct rx_bd *cons_bd, *prod_bd;
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
@@ -847,8 +892,8 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
}
@@ -868,7 +913,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
- &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
struct page *page;
dma_addr_t mapping;
@@ -885,7 +930,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
__set_bit(sw_prod, rxr->rx_agg_bmap);
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
- rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
rx_agg_buf->page = page;
rx_agg_buf->offset = offset;
@@ -927,7 +972,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
bool p5_tpa = false;
u32 i;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
@@ -961,13 +1006,13 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
prod_bd->rx_bd_opaque = sw_prod;
prod = NEXT_RX_AGG(prod);
- sw_prod = NEXT_RX_AGG(sw_prod);
+ sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -1094,7 +1139,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
u32 i, total_frag_len = 0;
bool p5_tpa = false;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
@@ -1249,7 +1294,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
agg_bufs = TPA_END_AGG_BUFS(tpa_end);
@@ -1290,8 +1335,39 @@ static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
return map->agg_id_tbl[agg_id];
}
+static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
+ tpa_info->vlan_valid = 0;
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->metadata =
+ le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ }
+}
+
+static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->vlan_valid = 0;
+ if (TPA_START_VLAN_VALID(tpa_start)) {
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
+ u32 vlan_proto = ETH_P_8021Q;
+
+ tpa_info->vlan_valid = 1;
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
+ vlan_proto = ETH_P_8021AD;
+ tpa_info->metadata = vlan_proto << 16 |
+ TPA_START_METADATA0_TCI(tpa_start1);
+ }
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
- struct rx_tpa_start_cmp *tpa_start,
+ u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -1300,7 +1376,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_bd *prod_bd;
dma_addr_t mapping;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
} else {
@@ -1309,7 +1385,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons ||
@@ -1320,17 +1396,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
bnxt_sched_reset_rxr(bp, rxr);
return;
}
- /* Store cfa_code in tpa_info to use in tpa_end
- * completion processing.
- */
- tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
prod_rx_buf->data = tpa_info->data;
prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
prod_rx_buf->mapping = mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1343,12 +1415,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
RX_TPA_START_CMP_LEN_SHIFT;
if (likely(TPA_START_HASH_VALID(tpa_start))) {
- u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
-
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
+ if (TPA_START_IS_IPV6(tpa_start1))
+ tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
+ else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1358,13 +1431,16 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
- tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
+ bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
+ else
+ bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
- cons = NEXT_RX(cons);
- rxr->rx_next_cons = NEXT_RX(cons);
+ cons = RING_RX(bp, NEXT_RX(cons));
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -1563,7 +1639,7 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
else
payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
@@ -1594,6 +1670,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
u8 *data_ptr, agg_bufs;
unsigned int len;
struct bnxt_tpa_info *tpa_info;
@@ -1611,7 +1688,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_END_AGG_ID_P5(tpa_end);
agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
@@ -1700,14 +1777,15 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
}
- skb->protocol =
- eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
+ if (tpa_info->cfa_code_valid)
+ dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
+ skb->protocol = eth_type_trans(skb, dev);
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
+ if (tpa_info->vlan_valid &&
+ (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
__be16 vlan_proto = htons(tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1774,6 +1852,64 @@ ts_valid:
return true;
}
+static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ struct rx_cmp *rxcmp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ __be16 vlan_proto;
+ u16 vtag;
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ __le32 flags2 = rxcmp1->rx_cmp_flags2;
+ u32 meta_data;
+
+ if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
+ return skb;
+
+ meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (eth_type_vlan(vlan_proto))
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ else
+ goto vlan_err;
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ if (RX_CMP_VLAN_VALID(rxcmp)) {
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
+
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
+ vlan_proto = htons(ETH_P_8021Q);
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
+ vlan_proto = htons(ETH_P_8021AD);
+ else
+ goto vlan_err;
+ vtag = RX_CMP_METADATA0_TCI(rxcmp1);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ }
+ }
+ return skb;
+vlan_err:
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
+ struct rx_cmp *rxcmp)
+{
+ u8 ext_op;
+
+ ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
+ switch (ext_op) {
+ case EXT_OP_INNER_4:
+ case EXT_OP_OUTER_4:
+ case EXT_OP_INNFL_3:
+ case EXT_OP_OUTFL_3:
+ return PKT_HASH_TYPE_L4;
+ default:
+ return PKT_HASH_TYPE_L3;
+ }
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1790,7 +1926,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
- u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -1827,8 +1963,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
dma_rmb();
prod = rxr->rx_prod;
- if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
- bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ bnxt_tpa_start(bp, rxr, cmp_type,
+ (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
@@ -1893,7 +2031,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1981,32 +2119,32 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (RX_CMP_HASH_VALID(rxcmp)) {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
- enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+ enum pkt_hash_types type;
- /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ type = bnxt_rss_ext_op(bp, rxcmp);
+ } else {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner
+ * 4-tuple
+ */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ else
+ type = PKT_HASH_TYPE_L4;
+ }
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
- cfa_code = RX_CMP_CFA_CODE(rxcmp1);
- skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
+ if (cmp_type == CMP_TYPE_RX_L2_CMP)
+ dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
+ skb->protocol = eth_type_trans(skb, dev);
- if ((rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
- u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
- u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __be16 vlan_proto = htons(meta_data >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT);
-
- if (eth_type_vlan(vlan_proto)) {
- __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
- } else {
- dev_kfree_skb(skb);
+ if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
+ if (!skb)
goto next_rx;
- }
}
skb_checksum_none_assert(skb);
@@ -2023,7 +2161,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@@ -2047,7 +2185,7 @@ next_rx:
next_rx_no_len:
rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
@@ -2086,7 +2224,8 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
- if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
@@ -2146,6 +2285,10 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ return link_info->force_link_speed2;
if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
return link_info->force_pam4_link_speed;
return link_info->force_link_speed;
@@ -2153,6 +2296,28 @@ static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->req_link_speed = link_info->force_link_speed2;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ switch (link_info->req_link_speed) {
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ break;
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
+ break;
+ default:
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ }
+ return;
+ }
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
if (link_info->force_pam4_link_speed) {
@@ -2163,12 +2328,25 @@ static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->advertising = link_info->auto_link_speeds2;
+ return;
+ }
link_info->advertising = link_info->auto_link_speeds;
link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
}
static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->req_link_speed != link_info->force_link_speed2)
+ return true;
+ return false;
+ }
if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
link_info->req_link_speed != link_info->force_link_speed)
return true;
@@ -2180,6 +2358,13 @@ static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->advertising != link_info->auto_link_speeds2)
+ return true;
+ return false;
+ }
if (link_info->advertising != link_info->auto_link_speeds ||
link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
return true;
@@ -2430,7 +2615,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto async_event_process_exit;
netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
@@ -2603,7 +2788,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
- int tx_pkts = 0;
int rx_pkts = 0;
u8 event = 0;
struct tx_cmp *txcmp;
@@ -2611,6 +2795,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 0;
cpr->had_work_done = 1;
while (1) {
+ u8 cmp_type;
int rc;
cons = RING_CMP(raw_cons);
@@ -2623,17 +2808,31 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
- tx_pkts++;
+ cmp_type = TX_CMP_TYPE(txcmp);
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
+ u32 opaque = txcmp->tx_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ u16 tx_freed;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ event |= BNXT_TX_CMP_EVENT;
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
+ txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
+ else
+ txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
+ tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
+ bp->tx_ring_mask;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
+ if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
cpr->has_more_work = 1;
break;
}
- } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
@@ -2650,12 +2849,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
- } else if (unlikely((TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_DONE) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
bnxt_hwrm_handler(bp, txcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -2670,7 +2866,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
xdp_do_flush();
if (event & BNXT_TX_EVENT) {
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
@@ -2680,7 +2876,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cpr->cp_raw_cons = raw_cons;
- bnapi->tx_pkts += tx_pkts;
bnapi->events |= event;
return rx_pkts;
}
@@ -2688,7 +2883,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
int budget)
{
- if (bnapi->tx_pkts && !bnapi->tx_fault)
+ if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
bnapi->tx_int(bp, bnapi, budget);
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
@@ -2701,7 +2896,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
}
- bnapi->events = 0;
+ bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -2841,10 +3036,10 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i, work_done = 0;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
- if (cpr2) {
+ if (cpr2->had_nqe_notify) {
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2859,14 +3054,22 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && cpr2->had_work_done) {
+ if (cpr2->had_work_done) {
+ u32 tgl = 0;
+
+ if (dbr_type == DBR_TYPE_CQ_ARMALL) {
+ cpr2->had_nqe_notify = 0;
+ tgl = cpr2->toggle;
+ }
db = &cpr2->cp_db;
- bnxt_writeq(bp, db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp,
+ db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
+ DB_RING_IDX(db, cpr2->cp_raw_cons),
+ db->doorbell);
cpr2->had_work_done = 0;
}
}
@@ -2893,6 +3096,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
}
while (1) {
+ u16 type;
+
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
@@ -2914,15 +3119,21 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
*/
dma_rmb();
- if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ type = le16_to_cpu(nqcmp->type);
+ if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
struct bnxt_cp_ring_info *cpr2;
/* No more budget for RX work */
- if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ if (budget && work_done >= budget &&
+ cq_type == BNXT_NQ_HDL_TYPE_RX)
break;
- cpr2 = cpr->cp_ring_arr[idx];
+ idx = BNXT_NQ_HDL_IDX(idx);
+ cpr2 = &cpr->cp_ring_arr[idx];
+ cpr2->had_nqe_notify = 1;
+ cpr2->toggle = NQE_CN_TOGGLE(type);
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2937,8 +3148,9 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
poll_done:
- cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
- if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ cpr_rx = &cpr->cp_ring_arr[0];
+ if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
+ (bp->flags & BNXT_FLAG_DIM)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3112,20 +3324,20 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{
- u8 init_val = mem_init->init_val;
- u16 offset = mem_init->offset;
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
u8 *p2 = p;
int i;
if (!init_val)
return;
- if (offset == BNXT_MEM_INVALID_OFFSET) {
+ if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len);
return;
}
- for (i = 0; i < len; i += mem_init->size)
+ for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val;
}
@@ -3192,8 +3404,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->mem_init)
- bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ if (rmem->ctx_mem)
+ bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
@@ -3240,7 +3452,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
int i, j;
bp->max_tpa = MAX_TPA;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
@@ -3255,7 +3467,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!rxr->rx_tpa)
return -ENOMEM;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
for (j = 0; j < bp->max_tpa; j++) {
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
@@ -3313,6 +3525,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.pool_size += bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi;
+ pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
@@ -3410,6 +3623,15 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
}
}
+#define BNXT_TC_TO_RING_BASE(bp, tc) \
+ ((tc) * (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC_OFF(bp, tx) \
+ ((tx) % (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC(bp, tx) \
+ ((tx) / (bp)->tx_nr_rings_per_tc)
+
static int bnxt_alloc_tx_rings(struct bnxt *bp)
{
int i, j, rc;
@@ -3465,7 +3687,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
- if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
j++;
}
return 0;
@@ -3548,36 +3770,33 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
bnxt_free_ring(bp, &ring->ring_mem);
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (!cpr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- bnxt_free_ring(bp, &ring->ring_mem);
- bnxt_free_cp_arrays(cpr2);
- kfree(cpr2);
- cpr->cp_ring_arr[j] = NULL;
- }
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ bnxt_free_cp_arrays(cpr2);
}
+ kfree(cpr->cp_ring_arr);
+ cpr->cp_ring_arr = NULL;
+ cpr->cp_ring_count = 0;
}
}
-static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
- struct bnxt_cp_ring_info *cpr;
int rc;
- cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
- if (!cpr)
- return NULL;
-
rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
if (rc) {
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- return NULL;
+ return -ENOMEM;
}
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
@@ -3590,23 +3809,26 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (rc) {
bnxt_free_ring(bp, rmem);
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- cpr = NULL;
}
- return cpr;
+ return rc;
}
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_base_vec, ulp_msix;
+ int tcs = bp->num_tc;
+ if (!tcs)
+ tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
ulp_base_vec = bnxt_get_ulp_msix_base(bp);
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
struct bnxt_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
if (!bnapi)
continue;
@@ -3624,35 +3846,55 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
else
ring->map_idx = i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
if (i < bp->rx_nr_rings) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
-
- cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
- cpr2->bnapi = bnapi;
+ cp_count++;
+ rx = 1;
+ }
+ if (i < bp->tx_nr_rings_xdp) {
+ cp_count++;
+ tx = 1;
+ } else if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
}
- if ((sh && i < bp->tx_nr_rings) ||
- (!sh && i >= bp->rx_nr_rings)) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
- cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
+ cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!cpr->cp_ring_arr)
+ return -ENOMEM;
+ cpr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr2 = &cpr->cp_ring_arr[k];
+ rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
+ if (rc)
+ return rc;
cpr2->bnapi = bnapi;
+ cpr2->cp_idx = k;
+ if (!k && rx) {
+ bp->rx_ring[i].rx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
+ bp->tx_ring[n].tx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
+ }
}
+ if (tx)
+ j++;
}
return 0;
}
static void bnxt_init_ring_struct(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3697,18 +3939,16 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
- txr = bnapi->tx_ring;
- if (!txr)
- continue;
-
- ring = &txr->tx_ring_struct;
- rmem = &ring->ring_mem;
- rmem->nr_pages = bp->tx_nr_pages;
- rmem->page_size = HW_RXBD_RING_SIZE;
- rmem->pg_arr = (void **)txr->tx_desc_ring;
- rmem->dma_arr = txr->tx_desc_mapping;
- rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- rmem->vmem = (void **)&txr->tx_buf_ring;
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
}
}
@@ -3799,6 +4039,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
@@ -3829,11 +4072,10 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
-
- if (!cpr2)
- continue;
+ if (!cpr->cp_ring_arr)
+ continue;
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
ring = &cpr2->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -3876,6 +4118,11 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (i >= bp->tx_nr_rings_xdp)
+ netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
+ NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
}
return 0;
@@ -3921,7 +4168,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings;
#endif
@@ -3952,13 +4199,22 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (i == 0)
+ if (!i) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ bp->toeplitz_prefix = 0;
get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
- else
+ for (k = 0; k < 8; k++) {
+ bp->toeplitz_prefix <<= 8;
+ bp->toeplitz_prefix |= key[k];
+ }
+ } else {
memcpy(vnic->rss_hash_key,
bp->vnic_info[0].rss_hash_key,
HW_HASH_KEY_SIZE);
+ }
}
}
}
@@ -4194,7 +4450,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_skip_grps;
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -4208,13 +4464,13 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
vnic_skip_grps:
- if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
/* Allocate rss table and hash key */
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
@@ -4324,7 +4580,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
- !(bp->flags & BNXT_FLAG_CHIP_P5))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
@@ -4362,7 +4618,7 @@ static void bnxt_init_stats(struct bnxt *bp)
stats = &cpr->stats;
rc = bnxt_hwrm_func_qstat_ext(bp, stats);
if (rc) {
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
mask = (1ULL << 48) - 1;
else
mask = -1ULL;
@@ -4508,7 +4764,7 @@ alloc_tx_ext_stats:
static void bnxt_clear_ring_indices(struct bnxt *bp)
{
- int i;
+ int i, j;
if (!bp->bnapi)
return;
@@ -4525,10 +4781,10 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
cpr = &bnapi->cp_ring;
cpr->cp_raw_cons = 0;
- txr = bnapi->tx_ring;
- if (txr) {
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
txr->tx_prod = 0;
txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
}
rxr = bnapi->rx_ring;
@@ -4538,12 +4794,12 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
rxr->rx_sw_agg_prod = 0;
rxr->rx_next_cons = 0;
}
+ bnapi->events = 0;
}
}
-static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
-#ifdef CONFIG_RFS_ACCEL
int i;
/* Under rtnl_lock and all our NAPIs have been disabled. It's
@@ -4555,40 +4811,73 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
- hlist_del(&fltr->hash);
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ continue;
+ hlist_del(&fltr->base.hash);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
kfree(fltr);
}
}
- if (irq_reinit) {
- bitmap_free(bp->ntp_fltr_bmap);
- bp->ntp_fltr_bmap = NULL;
- }
+ if (!all)
+ return;
+
+ bitmap_free(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
bp->ntp_fltr_count = 0;
-#endif
}
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (!(bp->flags & BNXT_FLAG_RFS))
+ if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
return 0;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
return rc;
-#else
- return 0;
-#endif
+}
+
+static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_l2_filter *fltr;
+
+ head = &bp->l2_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ continue;
+ hlist_del(&fltr->base.hash);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+ }
+ }
+}
+
+static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
}
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
@@ -4598,7 +4887,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
bnxt_free_all_cp_arrays(bp);
- bnxt_free_ntp_fltrs(bp, irq_re_init);
+ bnxt_free_ntp_fltrs(bp, false);
+ bnxt_free_l2_filters(bp, false);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
@@ -4641,7 +4931,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr =
&bp->bnapi[i]->cp_ring;
@@ -4659,11 +4949,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
rxr->rx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
rxr->rx_agg_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
+ } else {
+ rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
}
rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
@@ -4686,22 +4978,33 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
else
j = bp->rx_nr_rings;
- for (i = 0; i < bp->tx_nr_rings; i++, j++) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_napi *bnapi2;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
txr->tx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
- txr->bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
+ int k = j + BNXT_RING_TO_TC_OFF(bp, i);
+
+ bnapi2 = bp->bnapi[k];
txr->txq_index = i - bp->tx_nr_rings_xdp;
- bp->bnapi[j]->tx_int = bnxt_tx_int;
+ txr->tx_napi_idx =
+ BNXT_RING_TO_TC(bp, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ bnapi2->tx_int = bnxt_tx_int;
} else {
- bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
- bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ bnapi2 = bp->bnapi[j];
+ bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
+ bnapi2->tx_ring[0] = txr;
+ bnapi2->tx_int = bnxt_tx_int_xdp;
+ j++;
}
+ txr->bnapi = bnapi2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ txr->tx_cpr = &bnapi2->cp_ring;
}
rc = bnxt_alloc_stats(bp);
@@ -4913,6 +5216,8 @@ int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
+
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
struct hwrm_tunnel_dst_port_free_input *req;
@@ -4942,6 +5247,11 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
bp->nge_port = 0;
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
+ bp->vxlan_gpe_port = 0;
+ bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
+ break;
default:
break;
}
@@ -4950,6 +5260,8 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
if (rc)
netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
rc);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
return rc;
}
@@ -4985,9 +5297,16 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
bp->nge_port = port;
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ bp->vxlan_gpe_port = port;
+ bp->vxlan_gpe_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
+ break;
default:
break;
}
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
err_out:
hwrm_req_drop(bp, req);
@@ -5013,25 +5332,301 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send_silent(bp, req);
}
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ if (!atomic_dec_and_test(&fltr->refcnt))
+ return;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
+ struct bnxt_l2_filter *fltr;
+
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnxt_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct bnxt_l2_filter *fltr = NULL;
+
+ rcu_read_lock();
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ atomic_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+#define BNXT_IPV4_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
+
+#define BNXT_IPV6_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
+
+static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
+{
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ if (BNXT_IPV4_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v4addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
+ return sizeof(fkeys->addrs.v4addrs);
+ }
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (BNXT_IPV6_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v6addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
+ return sizeof(fkeys->addrs.v6addrs);
+ }
+
+ return 0;
+}
+
+static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
+ const unsigned char *key)
+{
+ u64 prefix = bp->toeplitz_prefix, hash = 0;
+ struct bnxt_ipv4_tuple tuple4;
+ struct bnxt_ipv6_tuple tuple6;
+ int i, j, len = 0;
+ u8 *four_tuple;
+
+ len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
+ if (!len)
+ return 0;
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuple4.v4addrs = fkeys->addrs.v4addrs;
+ tuple4.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple4;
+ } else {
+ tuple6.v6addrs = fkeys->addrs.v6addrs;
+ tuple6.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple6;
+ }
+
+ for (i = 0, j = 8; i < len; i++, j++) {
+ u8 byte = four_tuple[i];
+ int bit;
+
+ for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ }
+ prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
+}
+
#ifdef CONFIG_RFS_ACCEL
-static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+static struct bnxt_l2_filter *
+bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ return fltr;
+}
+#endif
+
+static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
+ struct bnxt_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNXT_FLTR_TYPE_L2;
+ if (fltr->base.flags) {
+ int bit_id;
+
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_MAX_FLTR, 0);
+ if (bit_id < 0)
+ return -ENOMEM;
+ fltr->base.sw_id = (u16)bit_id;
+ }
+ head = &bp->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ atomic_set(&fltr->refcnt, 1);
+ return 0;
+}
+
+static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ if (rc) {
+ bnxt_del_l2_filter(bp, fltr);
+ fltr = ERR_PTR(rc);
+ }
+ return fltr;
+}
+
+static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &pf->vf[vf_idx];
+
+ return vf->fw_fid;
+#else
+ return INVALID_HW_RING_ID;
+#endif
+}
+
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ if (target_id == INVALID_HW_RING_ID)
+ return -EINVAL;
+ }
+
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->l2_filter_id = fltr->base.filter_id;
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ }
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ req->flags |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ fltr->base.filter_id = resp->l2_filter_id;
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_free_input *req;
int rc;
+ set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
if (rc)
return rc;
- req->ntuple_filter_id = fltr->filter_id;
+ req->ntuple_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req);
}
#define BNXT_NTP_FLTR_FLAGS \
(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
- CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
@@ -5047,12 +5642,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
#define BNXT_NTP_TUNNEL_FLTR_FLAG \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
-static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+void bnxt_fill_ipv6_mask(__be32 mask[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ mask[i] = cpu_to_be32(~0);
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
u32 flags = 0;
int rc;
@@ -5061,42 +5665,47 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
if (rc)
return rc;
- req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+ l2_fltr = fltr->l2_fltr;
+ req->l2_filter_id = l2_fltr->base.filter_id;
+
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->rxq);
+ req->dst_id = cpu_to_le16(fltr->base.rxq);
} else {
- vnic = &bp->vnic_info[fltr->rxq + 1];
+ vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
- memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req->ip_protocol = keys->basic.ip_proto;
if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
- int i;
-
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ *(struct in6_addr *)&req->src_ipaddr[0] =
+ keys->addrs.v6addrs.src;
+ bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ *(struct in6_addr *)&req->dst_ipaddr[0] =
+ keys->addrs.v6addrs.dst;
+ bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
}
} else {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5104,80 +5713,85 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ req->src_port = keys->ports.src;
+ req->src_port_mask = cpu_to_be16(0xffff);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = cpu_to_be16(0xffff);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- fltr->filter_id = resp->ntuple_filter_id;
+ fltr->base.filter_id = resp->ntuple_filter_id;
hwrm_req_drop(bp, req);
return rc;
}
-#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
const u8 *mac_addr)
{
- struct hwrm_cfa_l2_filter_alloc_output *resp;
- struct hwrm_cfa_l2_filter_alloc_input *req;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
- if (rc)
- return rc;
-
- req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
- req->flags |=
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
- req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
- req->enables =
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
- memcpy(req->l2_addr, mac_addr, ETH_ALEN);
- req->l2_addr_mask[0] = 0xff;
- req->l2_addr_mask[1] = 0xff;
- req->l2_addr_mask[2] = 0xff;
- req->l2_addr_mask[3] = 0xff;
- req->l2_addr_mask[4] = 0xff;
- req->l2_addr_mask[5] = 0xff;
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
- resp->l2_filter_id;
- hwrm_req_drop(bp, req);
+ fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
return rc;
}
-static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
{
- struct hwrm_cfa_l2_filter_free_input *req;
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
- int rc;
/* Any associated ntuple filters will also be cleared by firmware. */
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 0; i < num_of_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
for (j = 0; j < vnic->uc_filter_count; j++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[j];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
vnic->uc_filter_count = 0;
}
- hwrm_req_drop(bp, req);
- return rc;
+}
+
+#define BNXT_DFLT_TUNL_TPA_BMAP \
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bp->vxlan_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
+ if (bp->vxlan_gpe_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+ if (bp->nge_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
@@ -5226,7 +5840,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
segs = MAX_TPA_SEGS_P5;
max_aggs = bp->max_tpa;
} else {
@@ -5236,6 +5850,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
req->max_aggs = cpu_to_le16(max_aggs);
req->min_agg_len = cpu_to_le32(512);
+ bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
}
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5252,35 +5867,25 @@ static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = rxr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
- }
}
static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return txr->tx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
- }
}
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
int entries;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
else
entries = HW_HASH_INDEX_SIZE;
@@ -5330,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (!rx_rings)
+ return 0;
+ return bnxt_calc_nr_ring_pages(rx_rings - 1,
+ BNXT_RSS_TABLE_ENTRIES_P5);
+ }
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@@ -5376,7 +5985,7 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else
bnxt_fill_hw_rss_tbl(bp, vnic);
@@ -5401,7 +6010,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
struct hwrm_vnic_rss_cfg_input *req;
int rc;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
@@ -5566,7 +6175,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
req->default_rx_ring_id =
@@ -5666,7 +6275,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_no_ring_grps;
/* map ring groups to this vnic */
@@ -5701,7 +6310,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
int rc;
bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
- bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
+ bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -5714,9 +6324,9 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
- bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
@@ -5725,18 +6335,22 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly.
*/
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
- (BNXT_CHIP_P5_THOR(bp) &&
+ (BNXT_CHIP_P5(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
- bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
else
- bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
}
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
}
hwrm_req_drop(bp, req);
return rc;
@@ -5749,7 +6363,7 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
int rc;
u16 i;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
@@ -5782,7 +6396,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
struct hwrm_ring_grp_free_input *req;
u16 i;
- if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
@@ -5842,12 +6456,15 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->length = cpu_to_le32(bp->tx_ring_mask + 1);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->queue_id = cpu_to_le16(ring->queue_id);
+ if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
+ req->cmpl_coal_cnt =
+ RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
break;
}
case HWRM_RING_ALLOC_RX:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 flags = 0;
/* Association of rx ring with stats context */
@@ -5862,7 +6479,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
}
break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
/* Association of agg ring with rx ring */
grp_info = &bp->grp_info[ring->grp_idx];
@@ -5880,7 +6497,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
/* Association of cp ring with nq */
grp_info = &bp->grp_info[map_index];
req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
@@ -5948,14 +6565,34 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
}
}
+static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bp->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bp->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bp->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bp->cp_ring_mask;
+ break;
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+ }
+}
+
static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
u32 map_idx, u32 xid)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
- else
- db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -5972,6 +6609,11 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
db->db_key64 |= (u64)xid << DBR_XID_SFT;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ db->db_key64 |= DBR_VALID;
+
+ db->doorbell = bp->bar1 + bp->db_offset;
} else {
db->doorbell = bp->bar1 + map_idx * 0x80;
switch (ring_type) {
@@ -5987,6 +6629,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
}
+ bnxt_set_db_mask(bp, db, ring_type);
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
@@ -5995,7 +6638,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
int i, rc = 0;
u32 type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = HWRM_RING_ALLOC_NQ;
else
type = HWRM_RING_ALLOC_CMPL;
@@ -6031,15 +6674,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_ring_struct *ring;
u32 map_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr, *cpr2;
u32 type2 = HWRM_RING_ALLOC_CMPL;
- cpr = &bnapi->cp_ring;
- cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_TX_HDL;
+ ring->handle = BNXT_SET_NQ_HDL(cpr2);
map_idx = bnapi->index;
rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
if (rc)
@@ -6071,14 +6712,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_cp_ring_info *cpr2;
- cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_RX_HDL;
+ ring->handle = BNXT_SET_NQ_HDL(cpr2);
rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
if (rc)
goto err_out;
@@ -6186,7 +6825,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
@@ -6213,7 +6852,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_NQ;
else
type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
@@ -6223,18 +6862,16 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
+ ring = &cpr2->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
}
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
@@ -6246,6 +6883,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
bool shared);
@@ -6282,14 +6921,16 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
int rx = hw_resc->resv_rx_rings;
int tx = hw_resc->resv_tx_rings;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx >>= 1;
if (cp < (rx + tx)) {
- bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@@ -6301,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
+get_rings_exit:
hwrm_req_drop(bp, req);
- return 0;
+ return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -6346,7 +6988,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
@@ -6362,16 +7004,17 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs =
- cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
bnxt_rfs_supported(bp))
req->num_rsscos_ctxs =
cpu_to_le16(ring_grps + 1);
@@ -6397,7 +7040,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= tx_rings + ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
@@ -6412,9 +7055,11 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -6508,7 +7153,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
{
int cp;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return bnxt_nq_rings_in_use(bp);
cp = bp->tx_nr_rings + bp->rx_nr_rings;
@@ -6565,7 +7210,8 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -6573,9 +7219,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5)))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
return true;
return false;
@@ -6590,13 +7236,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
int grp, rx_rings, rc;
int vnic = 1, stat;
bool sh = false;
+ int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -6639,7 +7287,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx = rx_rings << 1;
- cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, tx);
+ cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
bp->tx_nr_rings = tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -6686,7 +7335,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req->flags = cpu_to_le32(flags);
@@ -6708,7 +7357,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else
@@ -6902,10 +7551,40 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
return hwrm_req_send(bp, req_rx);
}
+static int
+bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+
+ req->ring_id = cpu_to_le16(ring_id);
+ return hwrm_req_send(bp, req);
+}
+
+static int
+bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i, rc;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ u16 ring_id;
+
+ ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ return rc;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ }
+ return 0;
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
- *req;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
int i, rc;
rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
@@ -6926,29 +7605,19 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_coal *hw_coal;
- u16 ring_id;
-
- req = req_rx;
- if (!bnapi->rx_ring) {
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req = req_tx;
- } else {
- ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
- }
- req->ring_id = cpu_to_le16(ring_id);
- rc = hwrm_req_send(bp, req);
+ if (!bnapi->rx_ring)
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
+ else
+ rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
if (rc)
break;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
- if (bnapi->rx_ring && bnapi->tx_ring) {
- req = req_tx;
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req->ring_id = cpu_to_le16(ring_id);
- rc = hwrm_req_send(bp, req);
+ if (bnapi->rx_ring && bnapi->tx_ring[0]) {
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
if (rc)
break;
}
@@ -7044,7 +7713,6 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
- u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -7102,16 +7770,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (bp->db_size)
goto func_qcfg_exit;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ if (BNXT_CHIP_P5(bp)) {
if (BNXT_PF(bp))
- min_db_offset = DB_PF_OFFSET_P5;
+ bp->db_offset = DB_PF_OFFSET_P5;
else
- min_db_offset = DB_VF_OFFSET_P5;
+ bp->db_offset = DB_VF_OFFSET_P5;
}
bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1024);
if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
- bp->db_size <= min_db_offset)
+ bp->db_size <= bp->db_offset)
bp->db_size = pci_resource_len(bp->pdev, 2);
func_qcfg_exit:
@@ -7119,37 +7788,99 @@ func_qcfg_exit:
return rc;
}
-static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
- struct hwrm_func_backing_store_qcaps_output *resp)
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
{
- struct bnxt_mem_init *mem_init;
- u16 init_mask;
- u8 init_val;
- u8 *offset;
- int i;
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
- init_val = resp->ctx_kind_initializer;
- init_mask = le16_to_cpu(resp->ctx_init_mask);
- offset = &resp->qp_init_offset;
- mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
- mem_init->init_val = init_val;
- mem_init->offset = BNXT_MEM_INVALID_OFFSET;
- if (!init_mask)
+ if (!ctxm->max_entries)
continue;
- if (i == BNXT_CTX_MEM_INIT_STAT)
- offset = &resp->stat_init_offset;
- if (init_mask & (1 << i))
- mem_init->offset = *offset * 4;
- else
- mem_init->init_val = 0;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
}
- ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+ return 0;
+}
+
+#define BNXT_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnxt_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+
+ resp = hwrm_req_hold(bp, req);
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; ) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnxt_init_ctx_initializer(ctxm, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNXT_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
+
+ctx_done:
+ hwrm_req_drop(bp, req);
+ return rc;
}
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
@@ -7161,6 +7892,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
return 0;
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
+
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
if (rc)
return rc;
@@ -7168,48 +7902,84 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- int i, tqm_rings;
+ u8 init_val, init_idx = 0;
+ u16 init_mask;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = bp->ctx;
if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
- ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
- ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
- ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
- ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
- ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
- ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
- ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
- ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
- ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
- ctx->vnic_max_vnic_entries =
- le16_to_cpu(resp->vnic_max_vnic_entries);
- ctx->vnic_max_ring_table_entries =
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ bp->ctx = ctx;
+ }
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
+ ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries);
- ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
- ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
- ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
- ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
- ctx->tqm_min_entries_per_ring =
- le32_to_cpu(resp->tqm_min_entries_per_ring);
- ctx->tqm_max_entries_per_ring =
- le32_to_cpu(resp->tqm_max_entries_per_ring);
- ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
- if (!ctx->tqm_entries_multiple)
- ctx->tqm_entries_multiple = 1;
- ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
- ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
- ctx->mrav_num_entries_units =
+ ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->vnic_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->stat_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctxm->entry_multiple = resp->tqm_entries_multiple;
+ if (!ctxm->entry_multiple)
+ ctxm->entry_multiple = 1;
+
+ memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
- ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
- ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->mrav_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
- bnxt_init_ctx_initializer(ctx, resp);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
@@ -7217,16 +7987,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
- ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < tqm_rings; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
- bp->ctx = ctx;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
+ ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
+
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
} else {
rc = 0;
}
@@ -7265,6 +8030,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
void **__req = (void **)&req;
u32 req_len = sizeof(*req);
__le32 *num_entries;
@@ -7286,82 +8052,102 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
- ctx_pg = &ctx->qp_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctx_pg = ctxm->pg_info;
req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
- req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
- req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
- req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
+ req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
+ req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->qpc_pg_size_qpc_lvl,
&req->qpc_page_dir);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
+ req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
- ctx_pg = &ctx->srq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctx_pg = ctxm->pg_info;
req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
- req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
+ req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->srq_pg_size_srq_lvl,
&req->srq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
- ctx_pg = &ctx->cq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctx_pg = ctxm->pg_info;
req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
- req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
+ req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->cq_pg_size_cq_lvl,
&req->cq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
- ctx_pg = &ctx->vnic_mem;
- req->vnic_num_vnic_entries =
- cpu_to_le16(ctx->vnic_max_vnic_entries);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctx_pg = ctxm->pg_info;
+ req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req->vnic_num_ring_table_entries =
- cpu_to_le16(ctx->vnic_max_ring_table_entries);
- req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
+ req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->vnic_pg_size_vnic_lvl,
&req->vnic_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
- ctx_pg = &ctx->stat_mem;
- req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
- req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctx_pg = ctxm->pg_info;
+ req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
+ req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->stat_pg_size_stat_lvl,
&req->stat_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
- ctx_pg = &ctx->mrav_mem;
+ u32 units;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctx_pg = ctxm->pg_info;
req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
- if (ctx->mrav_num_entries_units)
- flags |=
- FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
- req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ units = ctxm->mrav_num_entries_units;
+ if (units) {
+ u32 num_mr, num_ah = ctxm->mrav_av_entries;
+ u32 entries;
+
+ num_mr = ctx_pg->entries - num_ah;
+ entries = ((num_mr / units) << 16) | (num_ah / units);
+ req->mrav_num_entries = cpu_to_le32(entries);
+ flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
+ }
+ req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->mrav_pg_size_mrav_lvl,
&req->mrav_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
- ctx_pg = &ctx->tim_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctx_pg = ctxm->pg_info;
req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
- req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->tim_pg_size_tim_lvl,
&req->tim_page_dir);
}
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req->tqm_sp_num_entries,
pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req->tqm_sp_page_dir,
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
+ ctx_pg = ctxm->pg_info;
i < BNXT_MAX_TQM_RINGS;
+ ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
- req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
- ctx_pg = ctx->tqm_mem[i];
+ req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
@@ -7385,7 +8171,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, struct bnxt_mem_init *mem_init)
+ u8 depth, struct bnxt_ctx_mem_type *ctxm)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -7423,7 +8209,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7438,7 +8224,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -7473,41 +8259,144 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
+static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ hwrm_req_hold(bp, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
+ rc = hwrm_req_send(bp, req);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNXT_CTX_MAX - 1;
+ else
+ last_type = BNXT_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
- int i;
+ u16 type;
if (!ctx)
return;
- if (ctx->tqm_mem[0]) {
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
- bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
- kfree(ctx->tqm_mem[0]);
- ctx->tqm_mem[0] = NULL;
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
}
- bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ kfree(ctx);
+ bp->ctx = NULL;
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- struct bnxt_mem_init *init;
- u32 mem_size, ena, entries;
- u32 entries_sp, min;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
u8 pg_lvl = 1;
int i, rc;
@@ -7521,120 +8410,98 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = 65536;
- extra_srqs = 8192;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp destroy feature */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
- ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
- extra_qps;
- if (ctx->qp_entry_size) {
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->srq_mem;
- ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- if (ctx->srq_entry_size) {
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->cq_mem;
- ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- if (ctx->cq_entry_size) {
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->vnic_mem;
- ctx_pg->entries = ctx->vnic_max_vnic_entries +
- ctx->vnic_max_ring_table_entries;
- if (ctx->vnic_entry_size) {
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->stat_mem;
- ctx_pg->entries = ctx->stat_max_entries;
- if (ctx->stat_entry_size) {
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma;
- ctx_pg = &ctx->mrav_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
/* 128K extra is needed to accommodate static AH context
* allocation by f/w.
*/
- num_mr = 1024 * 256;
- num_ah = 1024 * 128;
- ctx_pg->entries = num_mr + num_ah;
- if (ctx->mrav_entry_size) {
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
- if (rc)
- return rc;
- }
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
- if (ctx->mrav_num_entries_units)
- ctx_pg->entries =
- ((num_mr / ctx->mrav_num_entries_units) << 16) |
- (num_ah / ctx->mrav_num_entries_units);
-
- ctx_pg = &ctx->tim_mem;
- ctx_pg->entries = ctx->qp_mem.entries;
- if (ctx->tim_entry_size) {
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
- if (rc)
- return rc;
- }
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- min = ctx->tqm_min_entries_per_ring;
- entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
- 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
- entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
- entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
- ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = i ? entries : entries_sp;
- if (ctx->tqm_entry_size) {
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
- NULL);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
- }
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
- rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ rc = bnxt_backing_store_cfg_v2(bp, ena);
+ else
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc);
@@ -7682,7 +8549,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix);
hw_resc->max_nqs = max_msix;
@@ -7711,7 +8578,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -7743,7 +8610,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
@@ -7815,10 +8682,16 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
+ bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
flags_ext2 = le32_to_cpu(resp->flags_ext2);
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
+ bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8450,7 +9323,7 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp)
int i;
/* Chip bug. Counter intermittently becomes 0. */
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ignore_zero = true;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8644,7 +9517,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
return;
bnxt_hwrm_clear_vnic_filter(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
/* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp);
@@ -8653,7 +9526,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, false);
bnxt_hwrm_vnic_free(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_hwrm_vnic_ctx_free(bp);
}
@@ -8810,7 +9683,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return __bnxt_setup_vnic_p5(bp, vnic_id);
else
return __bnxt_setup_vnic(bp, vnic_id);
@@ -8818,10 +9691,9 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -8834,7 +9706,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic = &bp->vnic_info[vnic_id];
vnic->flags |= BNXT_VNIC_RFS_FLAG;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) {
@@ -8847,9 +9719,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
break;
}
return rc;
-#else
- return 0;
-#endif
}
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
@@ -8928,7 +9797,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bnxt_hwrm_update_rss_hash_cfg(bp);
if (bp->flags & BNXT_FLAG_RFS) {
@@ -9046,8 +9915,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
return rc;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared)
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared)
{
int _rx = *rx, _tx = *tx;
@@ -9070,19 +9939,59 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
+static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
+{
+ return (tx - tx_xdp) / tx_sets + tx_xdp;
+}
+
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
+{
+ int tcs = bp->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+ return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
+}
+
+static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
+{
+ int tcs = bp->num_tc;
+
+ return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
+ bp->tx_nr_rings_xdp;
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool sh)
+{
+ int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
+
+ if (tx_cp != *tx) {
+ int tx_saved = tx_cp, rc;
+
+ rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
+ if (rc)
+ return rc;
+ if (tx_cp != tx_saved)
+ *tx = bnxt_num_cp_to_tx(bp, tx_cp);
+ return 0;
+ }
+ return __bnxt_trim_rings(bp, rx, tx, max, sh);
+}
+
static void bnxt_setup_msix(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
int tcs, i;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {
count = bp->tx_nr_rings_per_tc;
- off = i * count;
+ off = BNXT_TC_TO_RING_BASE(bp, i);
netdev_set_tc_queue(dev, i, count, off);
}
}
@@ -9108,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
- if (netdev_get_num_tc(bp->dev))
+ if (bp->num_tc) {
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
+ }
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@@ -9137,7 +10048,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
return bp->hw_resc.max_rsscos_ctxs;
@@ -9147,7 +10057,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
return bp->hw_resc.max_vnics;
}
-#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
@@ -9163,7 +10072,7 @@ static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
unsigned int cp = bp->hw_resc.max_cp_rings;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
cp -= bnxt_get_ulp_msix_num(bp);
return cp;
@@ -9173,7 +10082,7 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
@@ -9189,7 +10098,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int cp;
cp = bnxt_get_max_func_cp_rings_for_en(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return cp - bp->rx_nr_rings - bp->tx_nr_rings;
else
return cp - bp->cp_nr_rings;
@@ -9208,7 +10117,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
int max_idx, avail_msix;
max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
if (!BNXT_NEW_RM(bp) || avail_msix >= num)
@@ -9232,7 +10141,7 @@ static int bnxt_get_num_msix(struct bnxt *bp)
static int bnxt_init_msix(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
struct msix_entry *msix_ent;
total_vecs = bnxt_get_num_msix(bp);
@@ -9274,9 +10183,10 @@ static int bnxt_init_msix(struct bnxt *bp)
if (rc)
goto msix_setup_exit;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
bp->cp_nr_rings = (min == 1) ?
- max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
} else {
rc = -ENOMEM;
@@ -9336,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
- int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
+ int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@@ -9363,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@@ -9439,6 +10350,7 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
+ netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
@@ -9466,6 +10378,11 @@ static void bnxt_del_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -9486,7 +10403,7 @@ static void bnxt_init_napi(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_USING_MSIX) {
int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
@@ -9542,8 +10459,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
bnapi->in_reset = false;
- bnapi->tx_pkts = 0;
-
if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
@@ -9647,7 +10562,10 @@ void bnxt_report_link(struct bnxt *bp)
signal = "(NRZ) ";
break;
case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
- signal = "(PAM4) ";
+ signal = "(PAM4 56Gbps) ";
+ break;
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
+ signal = "(PAM4 112Gbps) ";
break;
default:
break;
@@ -9675,7 +10593,9 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
if (!resp->supported_speeds_auto_mode &&
!resp->supported_speeds_force_mode &&
!resp->supported_pam4_speeds_auto_mode &&
- !resp->supported_pam4_speeds_force_mode)
+ !resp->supported_pam4_speeds_force_mode &&
+ !resp->supported_speeds2_auto_mode &&
+ !resp->supported_speeds2_force_mode)
return true;
return false;
}
@@ -9721,6 +10641,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
/* Phy re-enabled, reprobe the speeds */
link_info->support_auto_speeds = 0;
link_info->support_pam4_auto_speeds = 0;
+ link_info->support_auto_speeds2 = 0;
}
}
if (resp->supported_speeds_auto_mode)
@@ -9729,6 +10650,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (resp->supported_pam4_speeds_auto_mode)
link_info->support_pam4_auto_speeds =
le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
+ if (resp->supported_speeds2_auto_mode)
+ link_info->support_auto_speeds2 =
+ le16_to_cpu(resp->supported_speeds2_auto_mode);
bp->port_count = resp->port_cnt;
@@ -9746,9 +10670,19 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported)
static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
/* Check if any advertised speeds are no longer supported. The caller
* holds the link_lock mutex, so we can modify link_info settings.
*/
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds2)) {
+ link_info->advertising = link_info->support_auto_speeds2;
+ return true;
+ }
+ return false;
+ }
if (bnxt_support_dropped(link_info->advertising,
link_info->support_auto_speeds)) {
link_info->advertising = link_info->support_auto_speeds;
@@ -9797,18 +10731,25 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
link_info->duplex_setting = resp->duplex_cfg;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
link_info->link_speed = le16_to_cpu(resp->link_speed);
- else
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ link_info->active_lanes = resp->active_lanes;
+ } else {
link_info->link_speed = 0;
+ link_info->active_lanes = 0;
+ }
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
link_info->force_pam4_link_speed =
le16_to_cpu(resp->force_pam4_link_speed);
+ link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
+ link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->auto_pam4_link_speeds =
le16_to_cpu(resp->auto_pam4_link_speed_mask);
+ link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
link_info->lp_auto_pam4_link_speeds =
@@ -9947,7 +10888,11 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
{
if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
- if (bp->link_info.advertising) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
+ req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
+ } else if (bp->link_info.advertising) {
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
}
@@ -9961,7 +10906,12 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
- if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
+ netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
+ (u32)bp->link_info.req_link_speed);
+ } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
} else {
@@ -10226,8 +11176,6 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -10627,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
+ bnxt_init_napi(bp);
set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
rc = bnxt_init_nic(bp, true);
if (rc) {
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ bnxt_del_napi(bp);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10649,6 +11599,7 @@ half_open_err:
void bnxt_half_close_nic(struct bnxt *bp)
{
bnxt_hwrm_resource_free(bp, false, true);
+ bnxt_del_napi(bp);
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@@ -11112,7 +12063,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- struct hwrm_cfa_l2_filter_free_input *req;
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -11124,16 +12074,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
if (!uc_update)
goto skip_uc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 1; i < vnic->uc_filter_count; i++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[i];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
- hwrm_req_drop(bp, req);
vnic->uc_filter_count = 1;
@@ -11210,7 +12156,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
@@ -11220,7 +12166,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
return true;
return false;
}
@@ -11228,10 +12174,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
@@ -11241,7 +12186,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
/* RSS contexts not a limiting factor */
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
@@ -11264,9 +12209,6 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false;
-#else
- return false;
-#endif
}
static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -11333,7 +12275,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
(flags & BNXT_FLAG_TPA) == 0 ||
- (bp->flags & BNXT_FLAG_CHIP_P5))
+ (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
re_init = true;
}
@@ -11442,9 +12384,10 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
struct udphdr *uh = udp_hdr(skb);
__be16 udp_port = uh->dest;
- if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
+ udp_port != bp->vxlan_gpe_port)
return false;
- if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ if (skb->inner_protocol == htons(ETH_P_TEB)) {
struct ethhdr *eh = inner_eth_hdr(skb);
switch (eh->h_proto) {
@@ -11455,6 +12398,11 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
skb_inner_network_offset(skb),
NULL);
}
+ } else if (skb->inner_protocol == htons(ETH_P_IP)) {
+ return true;
+ } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
}
return false;
}
@@ -11575,15 +12523,13 @@ static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int i = bnapi->index;
-
- if (!txr)
- return;
+ struct bnxt_tx_ring_info *txr;
+ int i = bnapi->index, j;
- netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
- i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
- txr->tx_cons);
+ bnxt_for_each_napi_tx(j, bnapi, txr)
+ netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
+ txr->tx_cons);
}
static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
@@ -11746,8 +12692,7 @@ static void bnxt_timer(struct timer_list *t)
if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
- netif_carrier_ok(dev))
+ if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
bnxt_restart_timer:
@@ -11856,8 +12801,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -12000,7 +12943,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -12013,12 +12956,11 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
u32 val[2];
- if (!cpr2 || cpr2->has_more_work ||
- !bnxt_has_work(bp, cpr2))
+ if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
continue;
if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
@@ -12179,36 +13121,42 @@ static void bnxt_sp_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
+static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ int *max_cp);
+
/* Under rtnl_lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
- int max_rx, max_tx, tx_sets = 1;
+ int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
int tx_rings_needed, stats;
int rx_rings = rx;
- int cp, vnics, rc;
+ int cp, vnics;
if (tcs)
tx_sets = tcs;
- rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
- if (rc)
- return rc;
+ _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
- if (max_rx < rx)
+ if (max_rx < rx_rings)
return -ENOMEM;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+
tx_rings_needed = tx * tx_sets + tx_xdp;
if (max_tx < tx_rings_needed)
return -ENOMEM;
vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
- vnics += rx_rings;
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
+ BNXT_FLAG_RFS)
+ vnics += rx;
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx_rings <<= 1;
- cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
+ cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < cp)
+ return -ENOMEM;
stats = cp;
if (BNXT_NEW_RM(bp)) {
cp += bnxt_get_ulp_msix_num(bp);
@@ -12283,10 +13231,10 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
{
u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
return true;
return false;
@@ -12298,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
+ /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+ * so wait before continuing with recovery.
+ */
+ if (rc)
+ msleep(100);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
rc = bnxt_try_recover_fw(bp);
@@ -12364,15 +13317,15 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
{
- bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bp->rss_hash_delta = bp->rss_hash_cfg;
if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
@@ -12842,7 +13795,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- int rc;
+ int rc, tx_cp;
if (tc > bp->max_tc) {
netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
@@ -12850,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
- if (netdev_get_num_tc(dev) == tc)
+ if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -12868,13 +13821,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
+ bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
+ bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
if (netif_running(bp->dev))
return bnxt_open_nic(bp, true, false);
@@ -12924,38 +13880,102 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-#ifdef CONFIG_RFS_ACCEL
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb)
+{
+ struct bnxt_vnic_info *vnic;
+
+ if (skb)
+ return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+
+ vnic = &bp->vnic_info[0];
+ return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
+}
+
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx)
+{
+ struct hlist_head *head;
+ int bit_id;
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ if (bit_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return -ENOMEM;
+ }
+
+ fltr->base.sw_id = (u16)bit_id;
+ fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
+ fltr->base.flags |= BNXT_ACT_RING_DST;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return 0;
+}
+
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
+ if (f1->ntuple_flags != f2->ntuple_flags)
+ return false;
+
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
+ keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
return false;
} else {
- if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src)) ||
- memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst)))
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
+ memcmp(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src))) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
+ memcmp(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst))))
return false;
}
- if (keys1->ports.ports == keys2->ports.ports &&
- keys1->control.flags == keys2->control.flags &&
- ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
- ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
+ keys1->ports.src != keys2->ports.src) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
+ keys1->ports.dst != keys2->ports.dst))
+ return false;
+
+ if (keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr)
return true;
return false;
}
+struct bnxt_ntuple_filter *
+bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx)
+{
+ struct bnxt_ntuple_filter *f;
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(f, head, base.hash) {
+ if (bnxt_fltr_match(f, fltr))
+ return f;
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_RFS_ACCEL
static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
@@ -12963,29 +13983,31 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
- int rc = 0, idx, bit_id, l2_idx = 0;
- struct hlist_head *head;
+ struct bnxt_l2_filter *l2_fltr;
+ int rc = 0, idx;
u32 flags;
- if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- int off = 0, j;
+ if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+ l2_fltr = bp->vnic_info[0].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ } else {
+ struct bnxt_l2_key key;
- netif_addr_lock_bh(dev);
- for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
- if (ether_addr_equal(eth->h_dest,
- vnic->uc_list + off)) {
- l2_idx = j + 1;
- break;
- }
- }
- netif_addr_unlock_bh(dev);
- if (!l2_idx)
+ ether_addr_copy(key.dst_mac_addr, eth->h_dest);
+ key.vlan = 0;
+ l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
+ if (!l2_fltr)
return -EINVAL;
+ if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return -EINVAL;
+ }
}
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
- if (!new_fltr)
+ if (!new_fltr) {
+ bnxt_del_l2_filter(bp, l2_fltr);
return -ENOMEM;
+ }
fkeys = &new_fltr->fkeys;
if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
@@ -13012,49 +14034,52 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
- memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
- memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+ new_fltr->l2_fltr = l2_fltr;
+ new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
- idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- head = &bp->ntp_fltr_hash_tbl[idx];
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (bnxt_fltr_match(fltr, new_fltr)) {
- rc = fltr->sw_id;
- rcu_read_unlock();
- goto err_free;
- }
- }
- rcu_read_unlock();
-
- spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_NTP_FLTR_MAX_FLTR, 0);
- if (bit_id < 0) {
- spin_unlock_bh(&bp->ntp_fltr_lock);
- rc = -ENOMEM;
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rc = fltr->base.sw_id;
+ rcu_read_unlock();
goto err_free;
}
+ rcu_read_unlock();
- new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
- new_fltr->l2_fltr_idx = l2_idx;
- new_fltr->rxq = rxq_index;
- hlist_add_head_rcu(&new_fltr->hash, head);
- bp->ntp_fltr_count++;
- spin_unlock_bh(&bp->ntp_fltr_lock);
-
- bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
-
- return new_fltr->sw_id;
+ new_fltr->base.rxq = rxq_index;
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
+ return new_fltr->base.sw_id;
+ }
err_free:
+ bnxt_del_l2_filter(bp, l2_fltr);
kfree(new_fltr);
return rc;
}
+#endif
+
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
+{
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ kfree_rcu(fltr, base.rcu);
+}
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
+#ifdef CONFIG_RFS_ACCEL
int i;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
@@ -13064,13 +14089,15 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
int rc;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bool del = false;
- if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
- if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
+ if (fltr->base.flags & BNXT_ACT_NO_AGING)
+ continue;
+ if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
fltr->flow_id,
- fltr->sw_id)) {
+ fltr->base.sw_id)) {
bnxt_hwrm_cfa_ntuple_filter_free(bp,
fltr);
del = true;
@@ -13081,30 +14108,16 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
if (rc)
del = true;
else
- set_bit(BNXT_FLTR_VALID, &fltr->state);
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
}
- if (del) {
- spin_lock_bh(&bp->ntp_fltr_lock);
- hlist_del_rcu(&fltr->hash);
- bp->ntp_fltr_count--;
- spin_unlock_bh(&bp->ntp_fltr_lock);
- synchronize_rcu();
- clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
- kfree(fltr);
- }
+ if (del)
+ bnxt_del_ntp_filter(bp, fltr);
}
}
+#endif
}
-#else
-
-static void bnxt_cfg_ntp_filters(struct bnxt *bp)
-{
-}
-
-#endif /* CONFIG_RFS_ACCEL */
-
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
@@ -13112,9 +14125,11 @@ static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int tabl
unsigned int cmd;
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
else
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
}
@@ -13127,8 +14142,10 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- else
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
@@ -13142,6 +14159,16 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
+}, bnxt_udp_tunnels_p7 = {
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ },
};
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -13249,6 +14276,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_free_l2_filters(bp, true);
+ bnxt_free_ntp_fltrs(bp, true);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -13271,8 +14300,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -13341,7 +14368,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
bnxt_get_ulp_msix_num(bp),
hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -13350,8 +14377,14 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
}
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rc;
+
+ rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
/* On P5 chips, max_cp output param should be available NQs */
*max_cp = max_irq;
}
@@ -13652,7 +14685,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
max_irqs = bnxt_get_max_irq(pdev);
- dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
+ max_irqs);
if (!dev)
return -ENOMEM;
@@ -13694,10 +14728,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
bnxt_vpd_read_info(bp);
- if (BNXT_CHIP_P5(bp)) {
- bp->flags |= BNXT_FLAG_CHIP_P5;
- if (BNXT_CHIP_SR2(bp))
- bp->flags |= BNXT_FLAG_CHIP_SR2;
+ if (BNXT_CHIP_P5_PLUS(bp)) {
+ bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
+ if (BNXT_CHIP_P7(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P7;
}
rc = bnxt_alloc_rss_indir_tbl(bp);
@@ -13722,6 +14756,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_features |= NETIF_F_GSO_UDP_L4;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
@@ -13732,7 +14768,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
- dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
+ else
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
@@ -13760,7 +14801,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
- else if (BNXT_CHIP_P5(bp))
+ else if (BNXT_CHIP_P5_PLUS(bp))
bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
@@ -13786,6 +14827,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
@@ -13868,8 +14910,6 @@ init_err_pci_clean:
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -13922,8 +14962,6 @@ static int bnxt_suspend(struct device *device)
bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
rtnl_unlock();
return rc;
}
@@ -14022,8 +15060,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
rtnl_unlock();
/* Request a slot slot reset. */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a7d7b09ea162..47338b48ca20 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 2
+#define DRV_VER_UPD 3
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -61,6 +61,24 @@ struct tx_bd {
__le64 tx_bd_haddr;
} __packed;
+#define TX_OPAQUE_IDX_MASK 0x0000ffff
+#define TX_OPAQUE_BDS_MASK 0x00ff0000
+#define TX_OPAQUE_BDS_SHIFT 16
+#define TX_OPAQUE_RING_MASK 0xff000000
+#define TX_OPAQUE_RING_SHIFT 24
+
+#define SET_TX_OPAQUE(bp, txr, idx, bds) \
+ (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
+ ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bp)->tx_ring_mask))
+
+#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
+#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
+ TX_OPAQUE_RING_SHIFT)
+#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
+ TX_OPAQUE_BDS_SHIFT)
+#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
+ (bp)->tx_ring_mask)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -121,11 +139,15 @@ struct tx_cmp {
__le32 tx_cmp_flags_type;
#define CMP_TYPE (0x3f << 0)
#define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
#define CMP_TYPE_RX_L2_CMP 17
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
#define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -152,9 +174,13 @@ struct tx_cmp {
#define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
#define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
- __le32 tx_cmp_unsed_3;
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
};
+#define TX_CMP_SQ_CONS_IDX(txcmp) \
+ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -182,8 +208,20 @@ struct rx_cmp {
#define RX_CMP_AGG_BUFS_SHIFT 1
#define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY (0xf << 12)
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12
+ #define RX_CMP_V3_RSS_EXT_OP_NEW (0xf << 8)
+ #define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8
#define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
#define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_CMP_SUB_NS_TS (0xf << 16)
+ #define RX_CMP_SUB_NS_TS_SHIFT 16
+ #define RX_CMP_METADATA1 (0xf << 28)
+ #define RX_CMP_METADATA1_SHIFT 28
+ #define RX_CMP_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_CMP_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_CMP_METADATA1_VALID (0x8 << 28)
__le32 rx_cmp_rss_hash;
};
@@ -203,6 +241,30 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
+ RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\
+ RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE(bp, rxcmp) \
+ (((bp)->rss_cap & BNXT_RSS_CAP_RSS_TCAM) ? \
+ RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \
+ RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp))
+
+#define EXT_OP_INNER_4 0x0
+#define EXT_OP_OUTER_4 0x2
+#define EXT_OP_INNFL_3 0x8
+#define EXT_OP_OUTFL_3 0xa
+
+#define RX_CMP_VLAN_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID))
+
+#define RX_CMP_VLAN_TPID_SEL(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL)
+
struct rx_cmp_ext {
__le32 rx_cmp_flags2;
#define RX_CMP_FLAGS2_IP_CS_CALC 0x1
@@ -250,6 +312,9 @@ struct rx_cmp_ext {
#define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
#define RX_CMPL_CFA_CODE_SFT 16
+ #define RX_CMPL_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_CMPL_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_CMPL_METADATA0_SFT 16
__le32 rx_cmp_timestamp;
};
@@ -275,6 +340,10 @@ struct rx_cmp_ext {
((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+#define RX_CMP_METADATA0_TCI(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT)
+
struct rx_agg_cmp {
__le32 rx_agg_cmp_len_flags_type;
#define RX_AGG_CMP_TYPE (0x3f << 0)
@@ -317,10 +386,18 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V1 (0x1 << 0)
#define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE (0x1ff << 7)
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
#define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
+ #define RX_TPA_START_CMP_METADATA1 (0xf << 28)
+ #define RX_TPA_START_CMP_METADATA1_SHIFT 28
+ #define RX_TPA_START_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_TPA_START_METADATA1_VALID (0x8 << 28)
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -334,6 +411,11 @@ struct rx_tpa_start_cmp {
RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
#define TPA_START_AGG_ID(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
@@ -346,6 +428,14 @@ struct rx_tpa_start_cmp {
((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+#define TPA_START_VLAN_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \
+ cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \
+ (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_METADATA1_TPID_SEL)
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -356,6 +446,8 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE (0x1 << 10)
+ #define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
@@ -369,6 +461,9 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ #define RX_TPA_START_CMP_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_TPA_START_CMP_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_TPA_START_CMP_METADATA0_SFT 16
__le32 rx_tpa_start_cmp_hdr_info;
};
@@ -385,6 +480,11 @@ struct rx_tpa_start_cmp_ext {
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+#define TPA_START_METADATA0_TCI(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \
+ RX_TPA_START_CMP_METADATA0_SFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -529,6 +629,8 @@ struct nqe_cn {
#define NQ_CN_TYPE_SFT 0
#define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
#define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
__le16 reserved16;
__le32 cq_handle_low;
__le32 v;
@@ -536,6 +638,23 @@ struct nqe_cn {
__le32 cq_handle_high;
};
+#define BNXT_NQ_HDL_IDX_MASK 0x00ffffff
+#define BNXT_NQ_HDL_TYPE_MASK 0xff000000
+#define BNXT_NQ_HDL_TYPE_SHIFT 24
+#define BNXT_NQ_HDL_TYPE_RX 0x00
+#define BNXT_NQ_HDL_TYPE_TX 0x01
+
+#define BNXT_NQ_HDL_IDX(hdl) ((hdl) & BNXT_NQ_HDL_IDX_MASK)
+#define BNXT_NQ_HDL_TYPE(hdl) (((hdl) & BNXT_NQ_HDL_TYPE_MASK) >> \
+ BNXT_NQ_HDL_TYPE_SHIFT)
+
+#define BNXT_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNXT_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -551,9 +670,14 @@ struct nqe_cn {
/* 64-bit doorbell */
#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_EPOCH_MASK 0x01000000UL
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_MASK 0x06000000UL
+#define DBR_TOGGLE_SFT 25
#define DBR_XID_MASK 0x000fffff00000000ULL
#define DBR_XID_SFT 32
#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
#define DBR_TYPE_SQ (0x0ULL << 60)
#define DBR_TYPE_RQ (0x1ULL << 60)
#define DBR_TYPE_SRQ (0x2ULL << 60)
@@ -566,6 +690,7 @@ struct nqe_cn {
#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
#define DBR_TYPE_NQ (0xaULL << 60)
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
#define DB_PF_OFFSET_P5 0x10000
@@ -661,10 +786,12 @@ struct nqe_cn {
*/
#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
-#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_RING(bp, x) (((x) & (bp)->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bp, x) (((x) & (bp)->rx_agg_ring_mask) >> \
+ (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
-#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
@@ -691,11 +818,14 @@ struct nqe_cn {
#define RX_CMP_TYPE(rxcmp) \
(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
-#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+#define RING_RX(bp, idx) ((idx) & (bp)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
-#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+#define RING_RX_AGG(bp, idx) ((idx) & (bp)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
-#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+#define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask)
+#define NEXT_TX(idx) ((idx) + 1)
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
@@ -708,6 +838,7 @@ struct nqe_cn {
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
#define BNXT_REDIRECT_EVENT 8
+#define BNXT_TX_CMP_EVENT 0x10
struct bnxt_sw_tx_bd {
union {
@@ -736,13 +867,6 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
-struct bnxt_mem_init {
- u8 init_val;
- u16 offset;
-#define BNXT_MEM_INVALID_OFFSET 0xffff
- u16 size;
-};
-
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -752,7 +876,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- struct bnxt_mem_init *mem_init;
+ struct bnxt_ctx_mem_type *ctx_mem;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -794,13 +918,27 @@ struct bnxt_db_info {
u64 db_key64;
u32 db_key32;
};
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
};
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *tx_cpr;
u16 tx_prod;
u16 tx_cons;
+ u16 tx_hw_cons;
u16 txq_index;
+ u8 tx_napi_idx;
u8 kick_pending;
struct bnxt_db_info tx_db;
@@ -895,6 +1033,8 @@ struct bnxt_tpa_info {
u16 cfa_code; /* cfa_code in TPA start compl */
u8 agg_count;
+ u8 vlan_valid:1;
+ u8 cfa_code_valid:1;
struct rx_agg_cmp *agg_arr;
};
@@ -907,6 +1047,7 @@ struct bnxt_tpa_idx_map {
struct bnxt_rx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *rx_cpr;
u16 rx_prod;
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
@@ -986,6 +1127,11 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1;
u8 has_more_work:1;
+ u8 had_nqe_notify:1;
+ u8 toggle;
+
+ u8 cp_ring_type;
+ u8 cp_idx;
u32 last_cp_raw_cons;
@@ -1010,11 +1156,18 @@ struct bnxt_cp_ring_info {
struct bnxt_ring_struct cp_ring_struct;
- struct bnxt_cp_ring_info *cp_ring_arr[2];
-#define BNXT_RX_HDL 0
-#define BNXT_TX_HDL 1
+ int cp_ring_count;
+ struct bnxt_cp_ring_info *cp_ring_arr;
};
+#define BNXT_MAX_QUEUE 8
+#define BNXT_MAX_TXR_PER_NAPI BNXT_MAX_QUEUE
+
+#define bnxt_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNXT_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
struct bnxt_napi {
struct napi_struct napi;
struct bnxt *bp;
@@ -1022,11 +1175,10 @@ struct bnxt_napi {
int index;
struct bnxt_cp_ring_info cp_ring;
struct bnxt_rx_ring_info *rx_ring;
- struct bnxt_tx_ring_info *tx_ring;
+ struct bnxt_tx_ring_info *tx_ring[BNXT_MAX_TXR_PER_NAPI];
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int budget);
- int tx_pkts;
u8 events;
u8 tx_fault:1;
@@ -1067,7 +1219,7 @@ struct bnxt_vnic_info {
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
- __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
u16 uc_filter_count;
u8 *uc_list;
@@ -1180,19 +1332,71 @@ struct bnxt_pf_info {
struct bnxt_vf_info *vf;
};
-struct bnxt_ntuple_filter {
+struct bnxt_filter_base {
struct hlist_node hash;
- u8 dst_mac_addr[ETH_ALEN];
- u8 src_mac_addr[ETH_ALEN];
- struct flow_keys fkeys;
__le64 filter_id;
+ u8 type;
+#define BNXT_FLTR_TYPE_NTUPLE 1
+#define BNXT_FLTR_TYPE_L2 2
+ u8 flags;
+#define BNXT_ACT_DROP 1
+#define BNXT_ACT_RING_DST 2
+#define BNXT_ACT_FUNC_DST 4
+#define BNXT_ACT_NO_AGING 8
u16 sw_id;
- u8 l2_fltr_idx;
u16 rxq;
- u32 flow_id;
+ u16 fw_vnic_id;
+ u16 vf_idx;
unsigned long state;
#define BNXT_FLTR_VALID 0
-#define BNXT_FLTR_UPDATE 1
+#define BNXT_FLTR_INSERTED 1
+#define BNXT_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnxt_ntuple_filter {
+ struct bnxt_filter_base base;
+ struct flow_keys fkeys;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 ntuple_flags;
+#define BNXT_NTUPLE_MATCH_SRC_IP 1
+#define BNXT_NTUPLE_MATCH_DST_IP 2
+#define BNXT_NTUPLE_MATCH_SRC_PORT 4
+#define BNXT_NTUPLE_MATCH_DST_PORT 8
+#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
+ BNXT_NTUPLE_MATCH_DST_IP | \
+ BNXT_NTUPLE_MATCH_SRC_PORT | \
+ BNXT_NTUPLE_MATCH_DST_PORT)
+ u32 flow_id;
+};
+
+struct bnxt_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+struct bnxt_ipv4_tuple {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+struct bnxt_ipv6_tuple {
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
+
+struct bnxt_l2_filter {
+ struct bnxt_filter_base base;
+ struct bnxt_l2_key l2_key;
+ atomic_t refcnt;
};
struct bnxt_link_info {
@@ -1214,6 +1418,7 @@ struct bnxt_link_info {
#define BNXT_LINK_STATE_DOWN 1
#define BNXT_LINK_STATE_UP 2
#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
+ u8 active_lanes;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1248,8 +1453,11 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
+#define BNXT_LINK_SPEED_400GB PORT_PHY_QCFG_RESP_LINK_SPEED_400GB
u16 support_speeds;
u16 support_pam4_speeds;
+ u16 support_speeds2;
+
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1265,12 +1473,52 @@ struct bnxt_link_info {
#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
+ u16 auto_link_speeds2;
+#define BNXT_LINK_SPEEDS2_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB
+#define BNXT_LINK_SPEEDS2_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB
+#define BNXT_LINK_SPEEDS2_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB
+#define BNXT_LINK_SPEEDS2_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB
+#define BNXT_LINK_SPEEDS2_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112
+
u16 support_auto_speeds;
u16 support_pam4_auto_speeds;
+ u16 support_auto_speeds2;
+
u16 lp_auto_link_speeds;
u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
u16 force_pam4_link_speed;
+ u16 force_link_speed2;
+#define BNXT_LINK_SPEED_50GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEED_200GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEED_400GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEED_200GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEED_400GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+
u32 preemphasis;
u8 module_status;
u8 active_fec_sig_mode;
@@ -1301,6 +1549,7 @@ struct bnxt_link_info {
u8 req_signal_mode;
#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+#define BNXT_SIG_MODE_PAM4_112 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1)
u8 req_duplex;
u8 req_flow_ctrl;
@@ -1361,8 +1610,6 @@ struct bnxt_link_info {
(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
BNXT_FEC_RS_OFF(link_info))
-#define BNXT_MAX_QUEUE 8
-
struct bnxt_queue_info {
u8 queue_id;
u8 queue_profile;
@@ -1391,7 +1638,7 @@ struct bnxt_test_info {
};
#define CHIMP_REG_VIEW_ADDR \
- ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+ ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 0x80000000 : 0xb1000000)
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
@@ -1515,53 +1762,73 @@ do { \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0)
+struct bnxt_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNXT_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNXT_MAX_SPLIT_ENTRY];
+ };
+ struct bnxt_ctx_pg_info *pg_info;
+};
+
+#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
+#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
+#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
+#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
+#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+
+#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
+#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_INV ((u16)-1)
+
struct bnxt_ctx_mem_info {
- u32 qp_max_entries;
- u16 qp_min_qp1_entries;
- u16 qp_max_l2_entries;
- u16 qp_entry_size;
- u16 srq_max_l2_entries;
- u32 srq_max_entries;
- u16 srq_entry_size;
- u16 cq_max_l2_entries;
- u32 cq_max_entries;
- u16 cq_entry_size;
- u16 vnic_max_vnic_entries;
- u16 vnic_max_ring_table_entries;
- u16 vnic_entry_size;
- u32 stat_max_entries;
- u16 stat_entry_size;
- u16 tqm_entry_size;
- u32 tqm_min_entries_per_ring;
- u32 tqm_max_entries_per_ring;
- u32 mrav_max_entries;
- u16 mrav_entry_size;
- u16 tim_entry_size;
- u32 tim_max_entries;
- u16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
-
- struct bnxt_ctx_pg_info qp_mem;
- struct bnxt_ctx_pg_info srq_mem;
- struct bnxt_ctx_pg_info cq_mem;
- struct bnxt_ctx_pg_info vnic_mem;
- struct bnxt_ctx_pg_info stat_mem;
- struct bnxt_ctx_pg_info mrav_mem;
- struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
-
-#define BNXT_CTX_MEM_INIT_QP 0
-#define BNXT_CTX_MEM_INIT_SRQ 1
-#define BNXT_CTX_MEM_INIT_CQ 2
-#define BNXT_CTX_MEM_INIT_VNIC 3
-#define BNXT_CTX_MEM_INIT_STAT 4
-#define BNXT_CTX_MEM_INIT_MRAV 5
-#define BNXT_CTX_MEM_INIT_MAX 6
- struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
+ struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX];
};
enum bnxt_health_severity {
@@ -1697,6 +1964,10 @@ enum board_idx {
BCM57508_NPAR,
BCM57504_NPAR,
BCM57502_NPAR,
+ BCM57608,
+ BCM57604,
+ BCM57602,
+ BCM57601,
BCM58802,
BCM58804,
BCM58808,
@@ -1744,14 +2015,14 @@ struct bnxt {
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_57608 0x1760
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
u8 chip_rev;
-#define CHIP_NUM_58818 0xd818
-
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1801,7 +2072,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_CHIP_P5 0x1
+ #define BNXT_FLAG_CHIP_P5_PLUS 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1820,8 +2091,6 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
- #define BNXT_FLAG_UDP_RSS_CAP 0x800
- #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
@@ -1829,13 +2098,15 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_CHIP_SR2 0x80000
+ #define BNXT_FLAG_CHIP_P7 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
+ #define BNXT_FLAG_UDP_GSO_CAP 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
+ #define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1855,21 +2126,21 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ||\
(bp)->max_tpa_v2) && !is_kdump_kernel())
#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
-#define BNXT_CHIP_SR2(bp) \
- ((bp)->chip_num == CHIP_NUM_58818)
+#define BNXT_CHIP_P7(bp) \
+ ((bp)->chip_num == CHIP_NUM_57608)
-#define BNXT_CHIP_P5_THOR(bp) \
+#define BNXT_CHIP_P5(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
- (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+#define BNXT_CHIP_P5_PLUS(bp) \
+ (BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp))
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1880,7 +2151,7 @@ struct bnxt {
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
#define BNXT_CHIP_P4_PLUS(bp) \
- (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -1941,6 +2212,11 @@ struct bnxt {
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
+ u32 rss_cap;
+#define BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA BIT(0)
+#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
+#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
+#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
u16 max_mtu;
u8 max_tc;
@@ -1949,6 +2225,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2006,7 +2283,6 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
- #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2023,6 +2299,8 @@ struct bnxt {
#define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
#define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
+ #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
+ #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
u32 fw_dbg_cap;
@@ -2067,8 +2345,10 @@ struct bnxt {
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
+ u16 vxlan_gpe_fw_dst_port_id;
__be16 vxlan_port;
__be16 nge_port;
+ __be16 vxlan_gpe_port;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -2136,9 +2416,11 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_offset; /* db_offset within db_size */
int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR)
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
@@ -2147,6 +2429,14 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+#define BNXT_L2_FLTR_MAX_FLTR 1024
+#define BNXT_L2_FLTR_HASH_SIZE 32
+#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE];
+
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
@@ -2169,6 +2459,7 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
+#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2212,15 +2503,15 @@ struct bnxt {
#define BNXT_NUM_TX_RING_STATS 8
#define BNXT_NUM_TPA_RING_STATS 4
#define BNXT_NUM_TPA_RING_STATS_P5 5
-#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+#define BNXT_NUM_TPA_RING_STATS_P7 6
#define BNXT_RING_STATS_SIZE_P5 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
BNXT_NUM_TPA_RING_STATS_P5) * 8)
-#define BNXT_RING_STATS_SIZE_P5_SR2 \
+#define BNXT_RING_STATS_SIZE_P7 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
- BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+ BNXT_NUM_TPA_RING_STATS_P7) * 8)
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
@@ -2303,10 +2594,11 @@ static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq_relaxed(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel_relaxed(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2318,10 +2610,11 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp,
static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2351,12 +2644,21 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
void bnxt_free_ctx_mem(struct bnxt *bp);
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
@@ -2366,7 +2668,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- int idx);
+ u16 curr);
void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
@@ -2393,6 +2695,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_fw_init_one(struct bnxt *bp);
bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
+struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx);
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb);
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx);
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 63e067038385..0dbb880a7aa0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
- int tc = netdev_get_num_tc(bp->dev);
+ int tc = bp->num_tc;
if (!tc)
tc = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 89809f1b129c..ae4529c043f0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -462,8 +462,6 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
}
bnxt_cancel_reservations(bp, false);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
@@ -734,7 +732,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
}
/* earlier devices present as an array of raw bytes */
- if (!BNXT_CHIP_P5(bp)) {
+ if (!BNXT_CHIP_P5_PLUS(bp)) {
dim = 0;
i = 0;
bits *= 3; /* array of 3 version components */
@@ -754,7 +752,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
goto exit;
bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
*nvm_cfg_ver <<= 8;
*nvm_cfg_ver |= ver.vu8;
} else {
@@ -774,7 +772,7 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
if (!strlen(buf))
return 0;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
return 0;
@@ -1000,7 +998,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 5f67a7f94e7d..dc4ca706b0e2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -460,6 +460,7 @@ static const struct {
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
};
static const struct {
@@ -510,9 +511,9 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
return BNXT_NUM_TPA_RING_STATS_P5;
- return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ return BNXT_NUM_TPA_RING_STATS_P7;
}
return BNXT_NUM_TPA_RING_STATS;
}
@@ -527,7 +528,8 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp)
bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS;
- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ return rx * bp->rx_nr_rings +
+ tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
cmn * bp->cp_nr_rings;
}
@@ -882,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@@ -922,6 +924,7 @@ static int bnxt_set_channels(struct net_device *dev,
bool sh = false;
int tx_xdp = 0;
int rc = 0;
+ int tx_cp;
if (channel->other_count)
return -EINVAL;
@@ -941,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -988,8 +991,9 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
@@ -1007,29 +1011,60 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
-static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
+ int tbl_size, u32 *ids, u32 start,
+ u32 id_cnt)
{
- int i, j = 0;
+ int i, j = start;
- cmd->data = bp->ntp_fltr_count;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ if (j >= id_cnt)
+ return j;
+ for (i = 0; i < tbl_size; i++) {
struct hlist_head *head;
- struct bnxt_ntuple_filter *fltr;
+ struct bnxt_filter_base *fltr;
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
+ head = &tbl[i];
hlist_for_each_entry_rcu(fltr, head, hash) {
- if (j == cmd->rule_cnt)
- break;
- rule_locs[j++] = fltr->sw_id;
+ if (!fltr->flags ||
+ test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
+ continue;
+ ids[j++] = fltr->sw_id;
+ if (j == id_cnt)
+ return j;
+ }
+ }
+ return j;
+}
+
+static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
+ struct hlist_head tbl[],
+ int tbl_size, u32 id)
+{
+ int i;
+
+ for (i = 0; i < tbl_size; i++) {
+ struct hlist_head *head;
+ struct bnxt_filter_base *fltr;
+
+ head = &tbl[i];
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->flags && fltr->sw_id == id)
+ return fltr;
}
- rcu_read_unlock();
- if (j == cmd->rule_cnt)
- break;
}
- cmd->rule_cnt = j;
+ return NULL;
+}
+
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ cmd->data = bp->ntp_fltr_count;
+ rcu_read_lock();
+ cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ rule_locs, 0, cmd->rule_cnt);
+ rcu_read_unlock();
+
return 0;
}
@@ -1037,27 +1072,24 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fs =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
struct flow_keys *fkeys;
- int i, rc = -EINVAL;
+ int rc = -EINVAL;
if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
return rc;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
- struct hlist_head *head;
-
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (fltr->sw_id == fs->location)
- goto fltr_found;
- }
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
rcu_read_unlock();
+ return rc;
}
- return rc;
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
-fltr_found:
fkeys = &fltr->fkeys;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
if (fkeys->basic.ip_proto == IPPROTO_TCP)
@@ -1067,20 +1099,23 @@ fltr_found:
else
goto fltr_err;
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ }
} else {
- int i;
-
if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->flow_type = TCP_V6_FLOW;
else if (fkeys->basic.ip_proto == IPPROTO_UDP)
@@ -1088,22 +1123,27 @@ fltr_found:
else
goto fltr_err;
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
- fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->ring_cookie = fltr->rxq;
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1111,7 +1151,221 @@ fltr_err:
return rc;
}
-#endif
+
+#define IPV4_ALL_MASK ((__force __be32)~0)
+#define L4_PORT_ALL_MASK ((__force __be16)~0)
+
+static bool ipv6_mask_is_full(__be32 mask[4])
+{
+ return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+}
+
+static bool ipv6_mask_is_zero(__be32 mask[4])
+{
+ return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct bnxt_ntuple_filter *new_fltr, *fltr;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 flow_type = fs->flow_type;
+ struct flow_keys *fkeys;
+ u32 idx;
+ int rc;
+
+ if (!bp->vnic_info)
+ return -EAGAIN;
+
+ if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ return -EOPNOTSUPP;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ l2_fltr = bp->vnic_info[0].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ new_fltr->l2_fltr = l2_fltr;
+ fkeys = &new_fltr->fkeys;
+
+ rc = -EOPNOTSUPP;
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V4_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+
+ if (ip_mask->ip4src == IPV4_ALL_MASK) {
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
+ } else if (ip_mask->ip4src) {
+ goto ntuple_err;
+ }
+ if (ip_mask->ip4dst == IPV4_ALL_MASK) {
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
+ } else if (ip_mask->ip4dst) {
+ goto ntuple_err;
+ }
+
+ if (ip_mask->psrc == L4_PORT_ALL_MASK) {
+ fkeys->ports.src = ip_spec->psrc;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
+ } else if (ip_mask->psrc) {
+ goto ntuple_err;
+ }
+ if (ip_mask->pdst == L4_PORT_ALL_MASK) {
+ fkeys->ports.dst = ip_spec->pdst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
+ } else if (ip_mask->pdst) {
+ goto ntuple_err;
+ }
+ break;
+ }
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V6_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+
+ if (ipv6_mask_is_full(ip_mask->ip6src)) {
+ fkeys->addrs.v6addrs.src =
+ *(struct in6_addr *)&ip_spec->ip6src;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
+ } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
+ goto ntuple_err;
+ }
+ if (ipv6_mask_is_full(ip_mask->ip6dst)) {
+ fkeys->addrs.v6addrs.dst =
+ *(struct in6_addr *)&ip_spec->ip6dst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
+ } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
+ goto ntuple_err;
+ }
+
+ if (ip_mask->psrc == L4_PORT_ALL_MASK) {
+ fkeys->ports.src = ip_spec->psrc;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
+ } else if (ip_mask->psrc) {
+ goto ntuple_err;
+ }
+ if (ip_mask->pdst == L4_PORT_ALL_MASK) {
+ fkeys->ports.dst = ip_spec->pdst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
+ } else if (ip_mask->pdst) {
+ goto ntuple_err;
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ goto ntuple_err;
+ }
+ if (!new_fltr->ntuple_flags)
+ goto ntuple_err;
+
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
+ rcu_read_lock();
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rcu_read_unlock();
+ rc = -EEXIST;
+ goto ntuple_err;
+ }
+ rcu_read_unlock();
+
+ new_fltr->base.rxq = ring;
+ new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
+ if (rc) {
+ bnxt_del_ntp_filter(bp, new_fltr);
+ return rc;
+ }
+ fs->location = new_fltr->base.sw_id;
+ return 0;
+ }
+
+ntuple_err:
+ atomic_dec(&l2_fltr->refcnt);
+ kfree(new_fltr);
+ return rc;
+}
+
+static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ u32 ring, flow_type;
+ int rc;
+ u8 vf;
+
+ if (!netif_running(bp->dev))
+ return -EAGAIN;
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return -EPERM;
+ if (fs->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ if (BNXT_VF(bp) && vf)
+ return -EINVAL;
+ if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
+ return -EINVAL;
+ if (!vf && ring >= bp->rx_nr_rings)
+ return -EINVAL;
+
+ flow_type = fs->flow_type;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+ if (flow_type == ETHER_FLOW)
+ rc = -EOPNOTSUPP;
+ else
+ rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ return rc;
+}
+
+static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct bnxt_filter_base *fltr_base;
+ struct bnxt_ntuple_filter *fltr;
+
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
+ if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
+ bnxt_del_ntp_filter(bp, fltr);
+ return 0;
+}
static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
{
@@ -1194,7 +1448,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
} else if (cmd->flow_type == UDP_V4_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
if (tuple == 4)
@@ -1204,7 +1458,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
} else if (cmd->flow_type == UDP_V6_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
@@ -1244,7 +1498,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg == rss_hash_cfg)
return 0;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
bp->rss_hash_cfg = rss_hash_cfg;
if (netif_running(bp->dev)) {
@@ -1261,14 +1515,13 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
-#ifdef CONFIG_RFS_ACCEL
case ETHTOOL_GRXRINGS:
cmd->data = bp->rx_nr_rings;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1278,7 +1531,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRULE:
rc = bnxt_grxclsrule(bp, cmd);
break;
-#endif
case ETHTOOL_GRXFH:
rc = bnxt_grxfh(bp, cmd);
@@ -1302,6 +1554,14 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
rc = bnxt_srxfh(bp, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ rc = bnxt_srxclsrlins(bp, cmd);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ rc = bnxt_srxclsrldel(bp, cmd);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -1313,8 +1573,9 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}
@@ -1323,49 +1584,49 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
return HW_HASH_KEY_SIZE;
}
-static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnxt_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!bp->vnic_info)
return 0;
vnic = &bp->vnic_info[0];
- if (indir && bp->rss_indir_tbl) {
+ if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = bp->rss_indir_tbl[i];
}
- if (key && vnic->rss_hash_key)
- memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ if (rxfh->key && vnic->rss_hash_key)
+ memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
-static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnxt_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (key)
+ if (rxfh->key)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = indir[i];
+ bp->rss_indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
@@ -1568,6 +1829,22 @@ static const enum bnxt_media_type bnxt_phy_types[] = {
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
};
static enum bnxt_media_type
@@ -1595,6 +1872,7 @@ enum bnxt_link_speed_indices {
BNXT_LINK_SPEED_50GB_IDX,
BNXT_LINK_SPEED_100GB_IDX,
BNXT_LINK_SPEED_200GB_IDX,
+ BNXT_LINK_SPEED_400GB_IDX,
__BNXT_LINK_SPEED_END
};
@@ -1606,9 +1884,21 @@ static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
- case BNXT_LINK_SPEED_50GB: return BNXT_LINK_SPEED_50GB_IDX;
- case BNXT_LINK_SPEED_100GB: return BNXT_LINK_SPEED_100GB_IDX;
- case BNXT_LINK_SPEED_200GB: return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ return BNXT_LINK_SPEED_50GB_IDX;
+ case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ return BNXT_LINK_SPEED_100GB_IDX;
+ case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return BNXT_LINK_SPEED_400GB_IDX;
default: return BNXT_LINK_SPEED_UNKNOWN;
}
}
@@ -1681,6 +1971,12 @@ bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
},
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ },
},
[BNXT_LINK_SPEED_200GB_IDX] = {
[BNXT_SIG_MODE_PAM4] = {
@@ -1689,6 +1985,26 @@ bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
},
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_400GB_IDX] = {
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ },
},
};
@@ -1753,7 +2069,8 @@ static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.supported);
}
- if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds)
+ if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
+ link_info->support_pam4_auto_speeds)
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
lk_ksettings->link_modes.supported);
@@ -1789,22 +2106,60 @@ static const u16 bnxt_pam4_speed_masks[] = {
[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_nrz_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
+ [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
+ [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
+ [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_pam4_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
+};
+
+static const u16 bnxt_pam4_112_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
};
static enum bnxt_link_speed_indices
-bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
+bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
{
const u16 *speeds;
int idx, len;
switch (sig_mode) {
case BNXT_SIG_MODE_NRZ:
- speeds = bnxt_nrz_speed_masks;
- len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_nrz_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
+ } else {
+ speeds = bnxt_nrz_speed_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ }
break;
case BNXT_SIG_MODE_PAM4:
- speeds = bnxt_pam4_speed_masks;
- len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_pam4_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
+ } else {
+ speeds = bnxt_pam4_speed_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ }
+ break;
+ case BNXT_SIG_MODE_PAM4_112:
+ speeds = bnxt_pam4_112_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
break;
default:
return BNXT_LINK_SPEED_UNKNOWN;
@@ -1822,14 +2177,14 @@ bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
static void
__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
- u8 sig_mode, unsigned long *et_mask)
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
{
enum ethtool_link_mode_bit_indices link_mode;
enum bnxt_link_speed_indices speed;
u8 bit;
for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
- speed = bnxt_encoding_speed_idx(sig_mode, 1 << bit);
+ speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
if (!speed)
continue;
@@ -1843,16 +2198,83 @@ __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
static void
bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
- u8 sig_mode, unsigned long *et_mask)
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
{
if (media) {
- __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
return;
}
/* list speeds for all media if unknown */
for (media = 1; media < __BNXT_MEDIA_END; media++)
- __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
+}
+
+static void
+bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_nrz = link_info->support_speeds2;
+ sp_pam4 = link_info->support_speeds2;
+ sp_pam4_112 = link_info->support_speeds2;
+ } else {
+ sp_nrz = link_info->support_speeds;
+ sp_pam4 = link_info->support_pam4_speeds;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.supported);
+}
+
+static void
+bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ sp_nrz = link_info->advertising;
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_pam4 = link_info->advertising;
+ sp_pam4_112 = link_info->advertising;
+ } else {
+ sp_pam4 = link_info->advertising_pam4;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.advertising);
+}
+
+static void
+bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 phy_flags = bp->phy_flags;
+
+ bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
+ BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
+ bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
+ BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
}
static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
@@ -1881,22 +2303,42 @@ static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
const unsigned long *et_mask)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
enum bnxt_media_type media = bnxt_get_media(link_info);
+ u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
+ u32 delta_pam4_112 = 0;
u32 delta_pam4 = 0;
u32 delta_nrz = 0;
int i, m;
+ adv = &link_info->advertising;
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ adv_pam4 = &link_info->advertising;
+ adv_pam4_112 = &link_info->advertising;
+ sp_msks = bnxt_nrz_speeds2_masks;
+ sp_pam4_msks = bnxt_pam4_speeds2_masks;
+ sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
+ } else {
+ adv_pam4 = &link_info->advertising_pam4;
+ sp_msks = bnxt_nrz_speed_masks;
+ sp_pam4_msks = bnxt_pam4_speed_masks;
+ }
for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
/* accept any legal media from user */
for (m = 1; m < __BNXT_MEDIA_END; m++) {
bnxt_update_speed(&delta_nrz, m == media,
- &link_info->advertising,
- bnxt_nrz_speed_masks[i], et_mask,
+ adv, sp_msks[i], et_mask,
bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
bnxt_update_speed(&delta_pam4, m == media,
- &link_info->advertising_pam4,
- bnxt_pam4_speed_masks[i], et_mask,
+ adv_pam4, sp_pam4_msks[i], et_mask,
bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
+ if (!adv_pam4_112)
+ continue;
+
+ bnxt_update_speed(&delta_pam4_112, m == media,
+ adv_pam4_112, sp_pam4_112_msks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
}
}
}
@@ -1961,11 +2403,20 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
case BNXT_LINK_SPEED_40GB:
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
return SPEED_100000;
case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
return SPEED_200000;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return SPEED_400000;
default:
return SPEED_UNKNOWN;
}
@@ -1981,6 +2432,7 @@ static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
base->duplex = DUPLEX_HALF;
if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
base->duplex = DUPLEX_FULL;
+ lk_ksettings->lanes = link_info->active_lanes;
} else if (!link_info->autoneg) {
base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
base->duplex = DUPLEX_HALF;
@@ -2008,12 +2460,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
bnxt_get_ethtool_modes(link_info, lk_ksettings);
media = bnxt_get_media(link_info);
- bnxt_get_ethtool_speeds(link_info->support_speeds,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.supported);
- bnxt_get_ethtool_speeds(link_info->support_pam4_speeds,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.supported);
+ bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
link_mode = bnxt_get_link_mode(link_info);
if (link_mode != BNXT_LINK_MODE_UNKNOWN)
@@ -2026,20 +2473,10 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
lk_ksettings->link_modes.advertising);
base->autoneg = AUTONEG_ENABLE;
- bnxt_get_ethtool_speeds(link_info->advertising,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.advertising);
- bnxt_get_ethtool_speeds(link_info->advertising_pam4,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.advertising);
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.lp_advertising);
- bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.lp_advertising);
- }
+ bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ bnxt_get_all_ethtool_lp_speeds(link_info, media,
+ lk_ksettings);
} else {
base->autoneg = AUTONEG_DISABLE;
}
@@ -2074,6 +2511,7 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
u16 support_pam4_spds = link_info->support_pam4_speeds;
+ u16 support_spds2 = link_info->support_speeds2;
u16 support_spds = link_info->support_speeds;
u8 sig_mode = BNXT_SIG_MODE_NRZ;
u32 lanes_needed = 1;
@@ -2085,7 +2523,8 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
- if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
@@ -2093,7 +2532,8 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
- if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
@@ -2103,26 +2543,34 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
}
break;
case SPEED_25000:
- if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
- if (support_spds & BNXT_LINK_SPEED_MSK_40GB) {
+ if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
lanes_needed = 4;
}
break;
case SPEED_50000:
- if ((support_spds & BNXT_LINK_SPEED_MSK_50GB) && lanes != 1) {
+ if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
+ lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
lanes_needed = 2;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
+ fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
}
break;
case SPEED_100000:
- if ((support_spds & BNXT_LINK_SPEED_MSK_100GB) &&
+ if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
lanes != 2 && lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
lanes_needed = 4;
@@ -2130,6 +2578,14 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
sig_mode = BNXT_SIG_MODE_PAM4;
lanes_needed = 2;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
+ lanes != 1) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 2;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
}
break;
case SPEED_200000:
@@ -2137,6 +2593,27 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
sig_mode = BNXT_SIG_MODE_PAM4;
lanes_needed = 4;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
+ lanes != 2) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 2;
+ }
+ break;
+ case SPEED_400000:
+ if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
+ lanes != 4) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 8;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 4;
}
break;
}
@@ -3910,7 +4387,8 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
+ TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -3934,8 +4412,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
int rc;
cpr = &rxr->bnapi->cp_ring;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ cpr = rxr->rx_cpr;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index d5fad5a3cdd1..e957abd704db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -40,6 +40,8 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
@@ -196,6 +198,9 @@ struct cmd_nums {
#define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
#define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
#define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -214,6 +219,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
#define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
#define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
#define HWRM_STAT_CTX_ENG_QUERY 0xafUL
#define HWRM_STAT_CTX_ALLOC 0xb0UL
#define HWRM_STAT_CTX_FREE 0xb1UL
@@ -261,6 +267,7 @@ struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_PORT_CFG 0xdcUL
#define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
@@ -392,6 +399,10 @@ struct cmd_nums {
#define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
#define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
#define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -406,9 +417,9 @@ struct cmd_nums {
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
#define HWRM_MFG_SOC_IMAGE 0x20cUL
#define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
- #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
#define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
@@ -418,6 +429,16 @@ struct cmd_nums {
#define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -582,9 +603,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 171
-#define HWRM_VERSION_STR "1.10.2.171"
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 15
+#define HWRM_VERSION_STR "1.10.3.15"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -816,7 +837,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1632,7 +1654,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:896b/112B) */
+/* hwrm_func_qcaps_output (size:1088b/136B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1736,21 +1758,29 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
__le16 max_key_ctxs_alloc;
__le32 flags_ext2;
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1760,15 +1790,21 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
- u8 key_xid_partition_cap;
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL
- u8 unused_1;
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[5];
+ u8 unused_2[2];
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ u8 unused_3[3];
u8 valid;
};
@@ -1783,7 +1819,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1024b/128B) */
+/* hwrm_func_qcfg_output (size:1280b/160B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1892,7 +1928,7 @@ struct hwrm_func_qcfg_output {
__le16 alloc_msix;
__le16 registered_vfs;
__le16 l2_doorbell_bar_size_kb;
- u8 unused_1;
+ u8 active_endpoints;
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
@@ -1952,15 +1988,26 @@ struct hwrm_func_qcfg_output {
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
u8 unused_5[2];
- __le32 alloc_tx_key_ctxs;
- __le32 alloc_rx_key_ctxs;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
u8 lag_id;
u8 parif;
- u8 unused_6[5];
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ u8 unused_7;
u8 valid;
};
-/* hwrm_func_cfg_input (size:1088b/136B) */
+/* hwrm_func_cfg_input (size:1280b/160B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1996,7 +2043,6 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- #define FUNC_CFG_REQ_FLAGS_KEY_CTX_ASSETS_TEST 0x80000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2028,8 +2074,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
#define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
#define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
__le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -2139,10 +2185,21 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
- u8 unused_0[4];
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
__le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
@@ -2165,7 +2222,18 @@ struct hwrm_func_cfg_input {
__le32 num_ktls_rx_key_ctxs;
__le32 num_quic_tx_key_ctxs;
__le32 num_quic_rx_key_ctxs;
- __le32 unused_2;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
+ __le16 unused_2;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2604,7 +2672,7 @@ struct hwrm_func_vf_resource_cfg_input {
__le32 max_quic_rx_key_ctxs;
};
-/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
struct hwrm_func_vf_resource_cfg_output {
__le16 error_code;
__le16 req_type;
@@ -2618,8 +2686,10 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- __le32 reserved_tx_key_ctxs;
- __le32 reserved_rx_key_ctxs;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
u8 unused_0[7];
u8 valid;
};
@@ -3441,7 +3511,8 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
u8 unused_0[3];
__le32 ptp_freq_adj_ext_period;
__le32 ptp_freq_adj_ext_up;
@@ -3627,28 +3698,28 @@ struct hwrm_func_backing_store_qcfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
u8 rsvd[4];
};
@@ -3744,6 +3815,15 @@ struct mrav_split_entries {
__le32 rsvd2[2];
};
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3761,8 +3841,8 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
@@ -3793,8 +3873,8 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
@@ -3838,56 +3918,55 @@ struct hwrm_func_backing_store_qcaps_v2_output {
/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
struct hwrm_func_dbr_pacing_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
struct hwrm_func_dbr_pacing_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
-#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
- u8 unused_0[7];
- __le32 dbr_stat_db_fifo_reg;
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \
- FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
- __le32 dbr_stat_db_fifo_reg_watermark_mask;
- u8 dbr_stat_db_fifo_reg_watermark_shift;
- u8 unused_1[3];
- __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
- u8 dbr_stat_db_fifo_reg_fifo_room_shift;
- u8 unused_2[3];
- __le32 dbr_throttling_aeq_arm_reg;
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \
- FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
- u8 dbr_throttling_aeq_arm_reg_val;
- u8 unused_3[7];
- __le32 primary_nq_id;
- __le32 pacing_threshold;
- u8 unused_4[7];
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
};
/* hwrm_func_drv_if_change_input (size:192b/24B) */
@@ -3915,7 +3994,7 @@ struct hwrm_func_drv_if_change_output {
u8 valid;
};
-/* hwrm_port_phy_cfg_input (size:448b/56B) */
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3960,6 +4039,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -3990,7 +4071,9 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
__le16 auto_link_speed;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
@@ -4054,7 +4137,36 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- u8 unused_2[2];
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ u8 unused_2[6];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -4104,7 +4216,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
@@ -4127,6 +4240,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4270,7 +4384,23 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4366,6 +4496,7 @@ struct hwrm_port_phy_qcfg_output {
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -4387,7 +4518,53 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
u8 link_down_reason;
#define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
- u8 unused_0[7];
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
u8 valid;
};
@@ -4426,6 +4603,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -4459,7 +4637,12 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- u8 unused_1[4];
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
__le64 ptp_adj_phase;
};
@@ -4504,6 +4687,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4968,7 +5152,7 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:256b/32B) */
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -5051,7 +5235,40 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
u8 valid;
};
@@ -5472,6 +5689,30 @@ struct hwrm_port_led_qcaps_output {
u8 valid;
};
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -7488,7 +7729,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
__le16 dst_id;
- __le16 mirror_vnic_id;
+ __le16 rfs_ring_tbl_idx;
u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
@@ -8201,6 +8442,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
u8 unused_0[3];
u8 valid;
};
@@ -8223,7 +8465,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
u8 unused_0[6];
};
@@ -8245,7 +8488,10 @@ struct hwrm_tunnel_dst_port_query_output {
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 unused_0[2];
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
u8 valid;
};
@@ -8267,7 +8513,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
u8 unused_0[4];
@@ -8284,7 +8531,8 @@ struct hwrm_tunnel_dst_port_alloc_output {
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
u8 upar_in_use;
#define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
#define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
@@ -8316,7 +8564,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
u8 unused_0[4];
@@ -9717,7 +9966,7 @@ struct hwrm_nvm_get_dev_info_input {
__le64 resp_addr;
};
-/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
+/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -9747,6 +9996,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 roce_fw_minor;
__le16 roce_fw_build;
__le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
u8 unused_0[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 6e3da3362bd6..cc07660330f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -129,7 +129,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
}
resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc)
*ts = le64_to_cpu(resp->ptp_msg_ts);
hwrm_req_drop(bp, req);
@@ -319,15 +319,17 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
-void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct hwrm_port_mac_cfg_input *req;
+ int rc;
if (!ptp || !ptp->tstamp_filters)
- return;
+ return -EIO;
- if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
goto out;
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
@@ -342,15 +344,17 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- if (!hwrm_req_send(bp, req)) {
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
- return;
+ return 0;
}
ptp->tstamp_filters = 0;
out:
bp->ptp_all_rx_tstamp = 0;
netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+ return rc;
}
void bnxt_ptp_reapply_pps(struct bnxt *bp)
@@ -494,7 +498,6 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 flags = 0;
- int rc = 0;
switch (ptp->rx_filter) {
case HWTSTAMP_FILTER_ALL:
@@ -519,18 +522,7 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
ptp->tstamp_filters = flags;
- if (netif_running(bp->dev)) {
- if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
- bnxt_close_nic(bp, false, false);
- rc = bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ptp_cfg_tstamp_filters(bp);
- }
- if (!rc && !ptp->tstamp_filters)
- rc = -EIO;
- }
-
- return rc;
+ return bnxt_ptp_cfg_tstamp_filters(bp);
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -649,7 +641,7 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
int rc, i;
reg_arr = ptp->refclk_regs;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
if (rc)
return rc;
@@ -692,8 +684,8 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
- netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
- rc);
+ netdev_warn_once(bp->dev,
+ "TS query for TX timer failed rc = %x\n", rc);
}
dev_kfree_skb_any(ptp->tx_skb);
@@ -966,7 +958,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 34162e07a119..fce8dc39a7d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -137,7 +137,7 @@ do { \
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
-void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c722b3b41730..175192ebaa77 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -536,7 +536,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
vf_ring_grps = 0;
} else {
@@ -565,7 +565,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->min_l2_ctxs = cpu_to_le16(min);
req->min_vnics = cpu_to_le16(min);
req->min_stat_ctx = cpu_to_le16(min);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
req->min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
@@ -602,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->max_rsscos_ctx = cpu_to_le16(vf_rss);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
req->max_msix = cpu_to_le16(vf_msix / num_vfs);
hwrm_req_hold(bp, req);
@@ -630,7 +630,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
le16_to_cpu(req->min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 6ba2b9398633..93f9bd55020f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -42,13 +42,10 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- ent[i].db_offset = DB_PF_OFFSET_P5;
- if (BNXT_VF(bp))
- ent[i].db_offset = DB_VF_OFFSET_P5;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ ent[i].db_offset = bp->db_offset;
+ else
ent[i].db_offset = (idx + i) * 0x80;
- }
}
}
@@ -333,6 +330,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->pdev = bp->pdev;
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
+ edev->l2_db_offset = bp->db_offset;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 6ff77f082e6c..b9e73de14b57 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -73,6 +73,10 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ int l2_db_offset; /* Doorbell offset in
+ * bytes within
+ * l2_db_size_nc.
+ */
u16 chip_num;
u16 hw_ring_stats_size;
u16 pf_port_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 8cb9a99154aa..4079538bc310 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -42,17 +42,17 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) |
((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
- txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
/* now let us fill up the frags into the next buffers */
@@ -66,10 +66,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */
- frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
frag_tx_buf->page = skb_frag_page(frag);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
flags = frag_len << TX_BD_LEN_SHIFT;
@@ -120,20 +120,20 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ u16 tx_hw_cons = txr->tx_hw_cons;
bool rx_doorbell_needed = false;
- int nr_pkts = bnapi->tx_pkts;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i, j, frags;
+ int j, frags;
if (!budget)
return;
- for (i = 0; i < nr_pkts; i++) {
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ while (RING_TX(bp, tx_cons) != tx_hw_cons) {
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev;
@@ -153,20 +153,20 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
frags = tx_buf->nr_frags;
for (j = 0; j < frags; j++) {
tx_cons = NEXT_TX(tx_cons);
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
}
} else {
- bnxt_sched_reset_txr(bp, txr, i);
+ bnxt_sched_reset_txr(bp, txr, tx_cons);
return;
}
tx_cons = NEXT_TX(tx_cons);
}
- bnapi->tx_pkts = 0;
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
WRITE_ONCE(txr->tx_cons, tx_cons);
if (rx_doorbell_needed) {
- tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
}
@@ -242,7 +242,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
pdev = bp->pdev;
offset = bp->rx_offset;
- txr = rxr->bnapi->tx_ring;
+ txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
@@ -268,7 +268,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
case XDP_TX:
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
- *event = 0;
+ *event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(&xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
@@ -391,7 +391,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
- int tx_xdp = 0, rc, tc;
+ int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
if (prog && !prog->aux->xdp_has_frags &&
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
- tc = netdev_get_num_tc(dev);
+ tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
@@ -439,7 +439,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
- bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f52830dfb26a..04964bbe08cf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12745,24 +12745,23 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
{
struct tg3 *tp = netdev_priv(dev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- indir[i] = tp->rss_ind_tbl[i];
+ rxfh->indir[i] = tp->rss_ind_tbl[i];
return 0;
}
-static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -12770,15 +12769,16 @@ static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- tp->rss_ind_tbl[i] = indir[i];
+ tp->rss_ind_tbl[i] = rxfh->indir[i];
if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
return 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 31191b520b58..c32174484a96 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
*/
static void
-bnad_tx_cleanup(struct delayed_work *work)
+bnad_tx_cleanup(struct work_struct *work)
{
struct bnad_tx_info *tx_info =
- container_of(work, struct bnad_tx_info, tx_cleanup_work);
+ container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
struct bnad *bnad = NULL;
struct bna_tcb *tcb;
unsigned long flags;
@@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
* Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
*/
static void
-bnad_rx_cleanup(void *work)
+bnad_rx_cleanup(struct work_struct *work)
{
struct bnad_rx_info *rx_info =
container_of(work, struct bnad_rx_info, rx_cleanup_work);
@@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
}
tx_info->tx = tx;
- INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
- (work_func_t)bnad_tx_cleanup);
+ INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
/* Register ISR for the Tx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
rx_info->rx = rx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- INIT_WORK(&rx_info->rx_cleanup_work,
- (work_func_t)(bnad_rx_cleanup));
+ INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
/*
* Init NAPI, so that state is set to NAPI_STATE_SCHED,
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index df10edff5603..d1ad6c9f8140 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -608,7 +608,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN));
- ethtool_sprintf(&string, bnad_net_stats_strings[i]);
+ ethtool_puts(&string, bnad_net_stats_strings[i]);
}
bmap = bna_tx_rid_mask(&bnad->bna);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 78c972bb1d96..aa5700ac9c00 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1165,9 +1165,10 @@ struct macb_ptp_info {
int (*get_ts_info)(struct net_device *dev,
struct ethtool_ts_info *info);
int (*get_hwtst)(struct net_device *netdev,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *tstamp_config);
int (*set_hwtst)(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack);
};
struct macb_pm_data {
@@ -1314,7 +1315,7 @@ struct macb {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
/* RX queue filer rule set*/
struct ethtool_rx_fs_list rx_fs_list;
@@ -1363,8 +1364,12 @@ static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, stru
gem_ptp_rxstamp(bp, skb, desc);
}
-int gem_get_hwtst(struct net_device *dev, struct ifreq *rq);
-int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+int gem_get_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config);
+int gem_set_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack);
#else
static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { }
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index cebae0f418f2..898debfd4db3 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3773,18 +3773,38 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (bp->ptp_info) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
- case SIOCGHWTSTAMP:
- return bp->ptp_info->get_hwtst(dev, rq);
- }
- }
-
return phylink_mii_ioctl(bp->phylink, rq, cmd);
}
+static int macb_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!bp->ptp_info)
+ return -EOPNOTSUPP;
+
+ return bp->ptp_info->get_hwtst(dev, cfg);
+}
+
+static int macb_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!bp->ptp_info)
+ return -EOPNOTSUPP;
+
+ return bp->ptp_info->set_hwtst(dev, cfg, extack);
+}
+
static inline void macb_set_txcsum_feature(struct macb *bp,
netdev_features_t features)
{
@@ -3884,6 +3904,8 @@ static const struct net_device_ops macb_netdev_ops = {
#endif
.ndo_set_features = macb_set_features,
.ndo_features_check = macb_features_check,
+ .ndo_hwtstamp_set = macb_hwtstamp_set,
+ .ndo_hwtstamp_get = macb_hwtstamp_get,
};
/* Configure peripheral capabilities according to device tree
@@ -4539,6 +4561,8 @@ static const struct net_device_ops at91ether_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
#endif
+ .ndo_hwtstamp_set = macb_hwtstamp_set,
+ .ndo_hwtstamp_get = macb_hwtstamp_get,
};
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 51d26fa190d7..a63bf29c4fa8 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -374,19 +374,16 @@ static int gem_ptp_set_ts_mode(struct macb *bp,
return 0;
}
-int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
+int gem_get_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config)
{
- struct hwtstamp_config *tstamp_config;
struct macb *bp = netdev_priv(dev);
- tstamp_config = &bp->tstamp_config;
+ *tstamp_config = bp->tstamp_config;
if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
return -EOPNOTSUPP;
- if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
@@ -401,22 +398,18 @@ static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
}
-int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
+int gem_set_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack)
{
enum macb_bd_control tx_bd_control = TSTAMP_DISABLED;
enum macb_bd_control rx_bd_control = TSTAMP_DISABLED;
- struct hwtstamp_config *tstamp_config;
struct macb *bp = netdev_priv(dev);
u32 regval;
- tstamp_config = &bp->tstamp_config;
if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
return -EOPNOTSUPP;
- if (copy_from_user(tstamp_config, ifr->ifr_data,
- sizeof(*tstamp_config)))
- return -EFAULT;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
@@ -463,12 +456,11 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
+ bp->tstamp_config = *tstamp_config;
+
if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0)
return -ERANGE;
- if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 068ed52b66c9..b3c81a2e9d46 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1490,7 +1490,7 @@ int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
- mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+ mbox_cmd.fn = cn23xx_get_vf_stats_callback;
ctx.stats = stats;
atomic_set(&ctx.status, 0);
mbox_cmd.fn_arg = (void *)&ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index dd5d80fee24f..d2fcb3da484e 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -429,7 +429,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
mbox_cmd.q_no = 0;
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
- mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+ mbox_cmd.fn = octeon_pfvf_hs_callback;
mbox_cmd.fn_arg = &status;
octeon_mbox_write(oct, &mbox_cmd);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 9cc6303c82ff..f38d31bfab1b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -27,6 +27,7 @@
#include "octeon_network.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* OOM task polling interval */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index d92bd7e16477..9ac85d22c615 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -57,7 +57,10 @@ union octeon_mbox_message {
} s;
};
-typedef void (*octeon_mbox_callback_t)(void *, void *, void *);
+struct octeon_mbox_cmd;
+
+typedef void (*octeon_mbox_callback_t)(struct octeon_device *,
+ struct octeon_mbox_cmd *, void *);
struct octeon_mbox_cmd {
union octeon_mbox_message msg;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d8d71bf97983..34125b8cd935 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -653,35 +653,36 @@ static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
return nic->rss_info.rss_size;
}
-static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int nicvf_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if (indir) {
+ if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
+ rxfh->indir[idx] = rss->ind_tbl[idx];
}
- if (hkey)
- memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int nicvf_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!rss->enable) {
@@ -690,13 +691,13 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
- if (indir) {
+ if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss->ind_tbl[idx] = rxfh->indir[idx];
}
- if (hkey) {
- memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ if (rxfh->key) {
+ memcpy(rss->key, rxfh->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 6d682b7c7aac..9d11e55981a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -237,7 +237,7 @@ struct adapter {
int msix_nvectors;
struct {
unsigned short vec;
- char desc[22];
+ char desc[IFNAMSIZ + 1 + 12]; /* Needs space for "%s-%d" */
} msix_info[SGE_QSETS + 1];
/* T3 modules */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index d117022d15d7..2236f1d35f2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -380,19 +380,18 @@ static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
*/
static void name_msix_vecs(struct adapter *adap)
{
- int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+ int i, j, msi_idx = 1;
- snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
- adap->msix_info[0].desc[n] = 0;
+ strscpy(adap->msix_info[0].desc, adap->name, sizeof(adap->msix_info[0].desc));
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
for (i = 0; i < pi->nqsets; i++, msi_idx++) {
- snprintf(adap->msix_info[msi_idx].desc, n,
+ snprintf(adap->msix_info[msi_idx].desc,
+ sizeof(adap->msix_info[0].desc),
"%s-%d", d->name, pi->first_qset + i);
- adap->msix_info[msi_idx].desc[n] = 0;
}
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 8477a93cee6b..47eecde36285 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1588,22 +1588,23 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
+static int get_rss_table(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!p)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
while (n--)
- p[n] = pi->rss[n];
+ rxfh->indir[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
- const u8 hfunc)
+static int set_rss_table(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@@ -1611,16 +1612,17 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!p)
+ if (!rxfh->indir)
return 0;
/* Interface must be brought up atleast once */
if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
+ pi->rss[i] = rxfh->indir[i];
return cxgb4_write_rss(pi, pi->rss);
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1c2a540db13d..1f495cfd7959 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
module_platform_driver(ep93xx_eth_driver);
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 08b7cc0a1809..241906697019 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -568,31 +568,32 @@ static u32 enic_get_rxfh_key_size(struct net_device *netdev)
return ENIC_RSS_LEN;
}
-static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int enic_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct enic *enic = netdev_priv(netdev);
- if (hkey)
- memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int enic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
- indir)
+ if (rxfh->indir ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- if (hkey)
- memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
+ if (rxfh->key)
+ memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
return __enic_set_rsskey(enic);
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 20fcb20b42ed..66b577835338 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 78287cfcbf63..705c3eb19cd3 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -79,8 +79,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -1143,23 +1142,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
- unsigned short mtu;
void *buffer;
int ret;
- mtu = ETH_HLEN;
- mtu += netdev->mtu;
- if (skb->protocol == htons(ETH_P_8021Q))
- mtu += VLAN_HLEN;
-
+ /* TODO: implement proper TSO using MTU in word3 */
word1 = skb->len;
word3 = SOF_BIT;
- if (word1 > mtu) {
- word1 |= TSS_MTU_ENABLE_BIT;
- word3 |= mtu;
- }
-
if (skb->len >= ETH_FRAME_LEN) {
/* Hardware offloaded checksumming isn't working on frames
* bigger than 1514 bytes. A hypothesis about this is that the
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index db6615aa921b..7bfeae04b52b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -565,8 +565,7 @@ static void rio_hw_init(struct net_device *dev)
* too. However, it doesn't work on IP1000A so we use 16-bit access.
*/
for (i = 0; i < 3; i++)
- dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
+ dw16(StationAddr0 + 2 * i, get_unaligned_le16(&dev->dev_addr[2 * i]));
set_multicast (dev);
if (np->coalesce) {
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index a29de29bdf23..f001a649f58f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1271,43 +1271,45 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev)
return RSS_HASH_KEY_LEN;
}
-static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int be_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i;
struct rss_info *rss = &adapter->rss_info;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
- indir[i] = rss->rss_queue[i];
+ rxfh->indir[i] = rss->rss_queue[i];
}
- if (hkey)
- memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->rss_hkey, RSS_HASH_KEY_LEN);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int be_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
int rc = 0, i, j;
struct be_adapter *adapter = netdev_priv(netdev);
+ u8 *hkey = rxfh->key;
u8 rsstable[RSS_INDIR_TABLE_LEN];
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
struct be_rx_obj *rxo;
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
- j = indir[i];
+ j = rxfh->indir[i];
rxo = &adapter->rx_obj[j];
rsstable[i] = rxo->rss_id;
adapter->rss_info.rss_queue[i] = j;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index df40c720e7b2..64eadd320798 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -719,17 +719,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
struct xdp_buff *xdp,
- struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+ bool zc)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
bool xmit;
+ u32 type;
if (unlikely(!xdpf))
return false;
+ /* no page pool for zero copy */
+ if (zc)
+ type = TSNEP_TX_TYPE_XDP_NDO;
+ else
+ type = TSNEP_TX_TYPE_XDP_TX;
+
__netif_tx_lock(tx_nq, smp_processor_id());
- xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
/* Avoid transmit queue timeout since we share it with the slow path */
if (xmit)
@@ -1273,7 +1281,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1323,7 +1331,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1485,7 +1493,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
xdp_prepare_buff(&xdp, page_address(entry->page),
XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
- length, false);
+ length - ETH_FCS_LEN, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
&xdp_status, tx_nq, tx);
@@ -1568,7 +1576,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
prefetch(entry->xdp->data);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
- xsk_buff_set_size(entry->xdp, length);
+ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
/* RX metadata with timestamps is in front of actual data,
@@ -1762,6 +1770,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
allocated--;
}
}
+
+ /* set need wakeup flag immediately if ring is not filled completely,
+ * first polling would be too late as need wakeup signalisation would
+ * be delayed for an indefinite time
+ */
+ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+ int desc_available = tsnep_rx_desc_available(rx);
+
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ }
}
static bool tsnep_pending(struct tsnep_queue *queue)
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 4d7184d46824..9ebe751c1df0 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -633,7 +633,7 @@ out_netdev:
return err;
}
-static s32 nps_enet_remove(struct platform_device *pdev)
+static void nps_enet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct nps_enet_priv *priv = netdev_priv(ndev);
@@ -641,8 +641,6 @@ static s32 nps_enet_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id nps_enet_dt_ids[] = {
@@ -653,7 +651,7 @@ MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
- .remove = nps_enet_remove,
+ .remove_new = nps_enet_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = nps_enet_dt_ids,
@@ -663,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index e01a246124ac..f3543a2df68d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -289,7 +289,7 @@ static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
int err;
if (port_priv->vlans[vid]) {
- netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ netdev_err(netdev, "VLAN %d already configured\n", vid);
return -EEXIST;
}
@@ -1509,9 +1509,9 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
struct device *dev = (struct device *)arg;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct ethsw_port_priv *port_priv;
- u32 status = ~0;
int err, if_id;
bool had_mac;
+ u32 status;
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
@@ -1523,12 +1523,11 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
if_id = (status & 0xFFFF0000) >> 16;
port_priv = ethsw->ports[if_id];
- if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
dpaa2_switch_port_link_state_update(port_priv->netdev);
- dpaa2_switch_port_set_mac_addr(port_priv);
- }
if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ dpaa2_switch_port_set_mac_addr(port_priv);
/* We can avoid locking because the "endpoint changed" IRQ
* handler is the only one who changes priv->mac at runtime,
* so we are not racing with anyone.
@@ -1540,20 +1539,20 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
dpaa2_switch_port_connect_mac(port_priv);
}
-out:
err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, status);
if (err)
dev_err(dev, "Can't clear irq status (err %d)\n", err);
+out:
return IRQ_HANDLED;
}
static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
{
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED | DPSW_IRQ_EVENT_ENDPOINT_CHANGED;
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
- u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
struct fsl_mc_device_irq *irq;
int err;
@@ -1775,8 +1774,10 @@ int dpaa2_switch_port_vlans_add(struct net_device *netdev,
/* Make sure that the VLAN is not already configured
* on the switch port
*/
- if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) {
+ netdev_err(netdev, "VLAN %d already configured\n", vlan->vid);
return -EEXIST;
+ }
/* Check if there is space for a new VLAN */
err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
@@ -2003,25 +2004,11 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct ethsw_port_priv *other_port_priv;
- struct net_device *other_dev;
- struct list_head *iter;
bool learn_ena;
int err;
- netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
- if (!dpaa2_switch_port_dev_check(other_dev))
- continue;
-
- other_port_priv = netdev_priv(other_dev);
- if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
- NL_SET_ERR_MSG_MOD(extack,
- "Interface from a different DPSW is in the bridge already");
- return -EINVAL;
- }
- }
-
/* Delete the previously manually installed VLAN 1 */
err = dpaa2_switch_port_del_vlan(port_priv, 1);
if (err)
@@ -2039,6 +2026,11 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
if (err)
goto err_egress_flood;
+ /* Recreate the egress flood domain of the FDB that we just left. */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
err = switchdev_bridge_port_offload(netdev, netdev, NULL,
NULL, NULL, false, extack);
if (err)
@@ -2155,6 +2147,10 @@ dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
int err;
if (!br_vlan_enabled(upper_dev)) {
@@ -2169,54 +2165,93 @@ dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
return 0;
}
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
-static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dpaa2_switch_port_prechangeupper(struct net_device *netdev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
- int err = 0;
+ int err;
if (!dpaa2_switch_port_dev_check(netdev))
- return NOTIFY_DONE;
+ return 0;
extack = netdev_notifier_info_to_extack(&info->info);
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev))
- break;
-
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
upper_dev,
extack);
if (err)
- goto out;
+ return err;
if (!info->linking)
dpaa2_switch_port_pre_bridge_leave(netdev);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_changeupper(struct net_device *netdev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ return dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
+ else
+ return dpaa2_switch_port_bridge_leave(netdev);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = dpaa2_switch_port_prechangeupper(netdev, ptr);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_CHANGEUPPER:
- upper_dev = info->upper_dev;
- if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
- err = dpaa2_switch_port_bridge_join(netdev,
- upper_dev,
- extack);
- else
- err = dpaa2_switch_port_bridge_leave(netdev);
- }
+ err = dpaa2_switch_port_changeupper(netdev, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
break;
}
-out:
- return notifier_from_errno(err);
+ return NOTIFY_DONE;
}
struct ethsw_switchdev_event_work {
@@ -3294,6 +3329,7 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_FILTER |
NETIF_F_HW_TC;
+ port_netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
err = dpaa2_switch_port_init(port_priv, port_idx);
if (err)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index cffbf27c4656..bfdbdab443ae 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index e993ed04ab57..f7753ea5b57e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -690,25 +690,26 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
-static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int enetc_get_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0, i;
/* return hash function */
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (key && hw->port)
+ if (rxfh->key && hw->port)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+ ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
+ ENETC_PRSSK(i));
/* return RSS table */
- if (indir)
- err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+ if (rxfh->indir)
+ err = enetc_get_rss_table(priv->si, rxfh->indir,
+ priv->si->num_rss);
return err;
}
@@ -722,20 +723,22 @@ void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
-static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int enetc_set_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0;
/* set hash key, if PF */
- if (key && hw->port)
- enetc_set_rss_key(hw, key);
+ if (rxfh->key && hw->port)
+ enetc_set_rss_key(hw, rxfh->key);
/* set RSS table */
- if (indir)
- err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+ if (rxfh->indir)
+ err = enetc_set_rss_table(priv->si, rxfh->indir,
+ priv->si->num_rss);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c153dc083aff..11b14555802c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -920,6 +920,7 @@ static void enetc_imdio_remove(struct enetc_pf *pf)
static bool enetc_port_has_pcs(struct enetc_pf *pf)
{
return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
}
@@ -1116,6 +1117,8 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
pf->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pf->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
pf->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_USXGMII,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e08c7b572497..432523b2c789 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
}
} else {
if (fep->link) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@@ -2932,10 +2934,10 @@ static void fec_enet_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
- ethtool_sprintf(&data, "%s", fec_stats[i].name);
+ ethtool_puts(&data, fec_stats[i].name);
}
for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
- ethtool_sprintf(&data, "%s", fec_xdp_stat_strs[i]);
+ ethtool_puts(&data, fec_xdp_stat_strs[i]);
}
page_pool_ethtool_stats_get_strings(data);
@@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9ba15d3183d7..758535adc9ff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev,
unsigned long capabilities;
unsigned long *supported;
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
@@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev,
* (and therefore that xfi_pcs cannot be set). If we are defaulting to
* XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
*/
- if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
memac->xfi_pcs = pcs;
else
memac->sgmii_pcs = pcs;
@@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
- /* The internal connection to the serdes is XGMII, but this isn't
- * really correct for the phy mode (which is the external connection).
- * However, this is how all older device trees say that they want
- * 10GBASE-R (aka XFI), so just convert it for them.
- */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
-
/* TODO: The following interface modes are supported by (some) hardware
* but not by this driver:
* - 1000BASE-KX
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 70dd982a5edc..026f7270a54d 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
module_platform_driver(fsl_pq_mdio_driver);
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 31aa185f4d17..4edd0adfc6c7 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -655,7 +655,7 @@ static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
i);
}
for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
- ethtool_sprintf(&p, txq_stat_names[j]);
+ ethtool_puts(&p, txq_stat_names[j]);
for (i = 0; i < fp->num_xdpqs; i++) {
for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
@@ -663,7 +663,7 @@ static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
xdpq_stat_names[j], i);
}
for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
- ethtool_sprintf(&p, xdpq_stat_names[j]);
+ ethtool_puts(&p, xdpq_stat_names[j]);
for (i = 0; i < netdev->real_num_rx_queues; i++) {
for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
@@ -671,10 +671,10 @@ static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
i);
}
for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
- ethtool_sprintf(&p, rxq_stat_names[j]);
+ ethtool_puts(&p, rxq_stat_names[j]);
for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
- ethtool_sprintf(&p, tls_stat_names[j]);
+ ethtool_puts(&p, tls_stat_names[j]);
break;
default:
break;
@@ -977,44 +977,44 @@ static u32 fun_get_rxfh_key_size(struct net_device *netdev)
return sizeof(fp->rss_key);
}
-static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int fun_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
const struct funeth_priv *fp = netdev_priv(netdev);
if (!fp->rss_cfg)
return -EOPNOTSUPP;
- if (indir)
- memcpy(indir, fp->indir_table,
+ if (rxfh->indir)
+ memcpy(rxfh->indir, fp->indir_table,
sizeof(u32) * fp->indir_table_nentries);
- if (key)
- memcpy(key, fp->rss_key, sizeof(fp->rss_key));
+ if (rxfh->key)
+ memcpy(rxfh->key, fp->rss_key, sizeof(fp->rss_key));
- if (hfunc)
- *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
- ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
+ rxfh->hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
+ ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
return 0;
}
-static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int fun_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct funeth_priv *fp = netdev_priv(netdev);
- const u32 *rss_indir = indir ? indir : fp->indir_table;
- const u8 *rss_key = key ? key : fp->rss_key;
+ const u32 *rss_indir = rxfh->indir ? rxfh->indir : fp->indir_table;
+ const u8 *rss_key = rxfh->key ? rxfh->key : fp->rss_key;
enum fun_eth_hash_alg algo;
if (!fp->rss_cfg)
return -EOPNOTSUPP;
- if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
algo = fp->hash_algo;
- else if (hfunc == ETH_RSS_HASH_CRC32)
+ else if (rxfh->hfunc == ETH_RSS_HASH_CRC32)
algo = FUN_ETH_RSS_ALG_CRC32;
- else if (hfunc == ETH_RSS_HASH_TOP)
+ else if (rxfh->hfunc == ETH_RSS_HASH_TOP)
algo = FUN_ETH_RSS_ALG_TOEPLITZ;
else
return -EINVAL;
@@ -1031,10 +1031,10 @@ static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
}
fp->hash_algo = algo;
- if (key)
- memcpy(fp->rss_key, key, sizeof(fp->rss_key));
- if (indir)
- memcpy(fp->indir_table, indir,
+ if (rxfh->key)
+ memcpy(fp->rss_key, rxfh->key, sizeof(fp->rss_key));
+ if (rxfh->indir)
+ memcpy(fp->indir_table, rxfh->indir,
sizeof(u32) * fp->indir_table_nentries);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 0d1e681be250..b80349154604 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -8,6 +8,7 @@
#define _GVE_H_
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -41,12 +42,16 @@
#define NIC_TX_STATS_REPORT_NUM 0
#define NIC_RX_STATS_REPORT_NUM 4
+#define GVE_ADMINQ_BUFFER_SIZE 4096
+
#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
-#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+
+#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
@@ -672,6 +677,7 @@ struct gve_priv {
/* Admin queue - see gve_adminq.h*/
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
+ struct dma_pool *adminq_pool;
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 79db7a6d42bc..12fbd723ecc6 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -194,12 +194,19 @@ gve_process_device_options(struct gve_priv *priv,
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{
- priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
- &priv->adminq_bus_addr, GFP_KERNEL);
- if (unlikely(!priv->adminq))
+ priv->adminq_pool = dma_pool_create("adminq_pool", dev,
+ GVE_ADMINQ_BUFFER_SIZE, 0, 0);
+ if (unlikely(!priv->adminq_pool))
return -ENOMEM;
+ priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
+ &priv->adminq_bus_addr);
+ if (unlikely(!priv->adminq)) {
+ dma_pool_destroy(priv->adminq_pool);
+ return -ENOMEM;
+ }
- priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
+ priv->adminq_mask =
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0;
priv->adminq_cmd_fail = 0;
priv->adminq_timeouts = 0;
@@ -218,9 +225,20 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_get_ptype_map_cnt = 0;
/* Setup Admin queue with the device */
- iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
- &priv->reg_bar0->adminq_pfn);
-
+ if (priv->pdev->revision < 0x1) {
+ iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
+ &priv->reg_bar0->adminq_pfn);
+ } else {
+ iowrite16be(GVE_ADMINQ_BUFFER_SIZE,
+ &priv->reg_bar0->adminq_length);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ iowrite32be(priv->adminq_bus_addr >> 32,
+ &priv->reg_bar0->adminq_base_address_hi);
+#endif
+ iowrite32be(priv->adminq_bus_addr,
+ &priv->reg_bar0->adminq_base_address_lo);
+ iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
+ }
gve_set_admin_queue_ok(priv);
return 0;
}
@@ -230,16 +248,27 @@ void gve_adminq_release(struct gve_priv *priv)
int i = 0;
/* Tell the device the adminq is leaving */
- iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
- while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
- /* If this is reached the device is unrecoverable and still
- * holding memory. Continue looping to avoid memory corruption,
- * but WARN so it is visible what is going on.
- */
- if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
- WARN(1, "Unrecoverable platform error!");
- i++;
- msleep(GVE_ADMINQ_SLEEP_LEN);
+ if (priv->pdev->revision < 0x1) {
+ iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
+ while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
+ /* If this is reached the device is unrecoverable and still
+ * holding memory. Continue looping to avoid memory corruption,
+ * but WARN so it is visible what is going on.
+ */
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
+ } else {
+ iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
+ while (!(ioread32be(&priv->reg_bar0->device_status)
+ & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) {
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
}
gve_clear_device_rings_ok(priv);
gve_clear_device_resources_ok(priv);
@@ -251,7 +280,8 @@ void gve_adminq_free(struct device *dev, struct gve_priv *priv)
if (!gve_get_admin_queue_ok(priv))
return;
gve_adminq_release(priv);
- dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
+ dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr);
+ dma_pool_destroy(priv->adminq_pool);
gve_clear_admin_queue_ok(priv);
}
@@ -697,18 +727,7 @@ static int gve_set_desc_cnt(struct gve_priv *priv,
struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
- return -EINVAL;
- }
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
- < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
- return -EINVAL;
- }
return 0;
}
@@ -778,8 +797,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
u16 mtu;
memset(&cmd, 0, sizeof(cmd));
- descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
- &descriptor_bus, GFP_KERNEL);
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
+ &descriptor_bus);
if (!descriptor)
return -ENOMEM;
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
@@ -787,7 +806,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
cpu_to_be64(descriptor_bus);
cmd.describe_device.device_descriptor_version =
cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
- cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
+ cmd.describe_device.available_length =
+ cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE);
err = gve_adminq_execute_cmd(priv, &cmd);
if (err)
@@ -868,8 +888,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_op_jumbo_frames, dev_op_dqo_qpl);
free_device_descriptor:
- dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
- descriptor_bus);
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
return err;
}
@@ -898,6 +917,7 @@ int gve_adminq_register_page_list(struct gve_priv *priv,
.page_list_id = cpu_to_be32(qpl->id),
.num_pages = cpu_to_be32(num_entries),
.page_address_list_addr = cpu_to_be64(page_list_bus),
+ .page_size = cpu_to_be64(PAGE_SIZE),
};
err = gve_adminq_execute_cmd(priv, &cmd);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 38a22279e863..5865ccdccbd0 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -219,9 +219,10 @@ struct gve_adminq_register_page_list {
__be32 page_list_id;
__be32 num_pages;
__be64 page_address_list_addr;
+ __be64 page_size;
};
-static_assert(sizeof(struct gve_adminq_register_page_list) == 16);
+static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
struct gve_adminq_unregister_page_list {
__be32 page_list_id;
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 1eb4d5fd8561..c36b93f0de15 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -33,6 +33,9 @@
#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 233e5946905e..e5397aa1e48f 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
- (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+ GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
len = *(u32 *)value;
if (len > max_copybreak)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 2d42e733837b..619bf63ec935 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -79,6 +79,18 @@ static int gve_verify_driver_compatibility(struct gve_priv *priv)
return err;
}
+static netdev_features_t gve_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (!gve_is_gqi(priv))
+ return gve_features_check_dqo(skb, dev, features);
+
+ return features;
+}
+
static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -1316,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
- priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
@@ -1652,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1879,6 +1891,7 @@ err:
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
+ .ndo_features_check = gve_features_check,
.ndo_open = gve_open,
.ndo_stop = gve_close,
.ndo_get_stats64 = gve_get_stats,
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index fb655463c357..8e72b97008d6 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -18,11 +18,20 @@ struct gve_registers {
__be32 adminq_event_counter;
u8 reserved[3];
u8 driver_version;
+ __be32 adminq_base_address_hi;
+ __be32 adminq_base_address_lo;
+ __be16 adminq_length;
};
enum gve_device_status_flags {
GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3),
+ GVE_DEVICE_STATUS_DEVICE_IS_RESET = BIT(4),
+};
+
+enum gve_driver_status_flags {
+ GVE_DRIVER_STATUS_RUN_MASK = BIT(0),
+ GVE_DRIVER_STATUS_RESET_MASK = BIT(1),
};
#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 73655347902d..76615d47e055 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -211,9 +211,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
- u32 slots, npages;
int filled_pages;
size_t bytes;
+ u32 slots;
int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
@@ -270,12 +270,6 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* alloc rx desc ring */
bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
- npages = bytes / PAGE_SIZE;
- if (npages * PAGE_SIZE != bytes) {
- err = -EIO;
- goto abort_with_q_resources;
- }
-
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL);
if (!rx->desc.desc_ring) {
@@ -289,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
- rx->packet_buffer_size = PAGE_SIZE / 2;
+ rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
@@ -362,7 +356,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
- u16 packet_buffer_size, u16 len,
+ unsigned int truesize, u16 len,
struct gve_rx_ctx *ctx)
{
u32 offset = page_info->page_offset + page_info->pad;
@@ -395,20 +389,20 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
if (skb != ctx->skb_head) {
ctx->skb_head->len += len;
ctx->skb_head->data_len += len;
- ctx->skb_head->truesize += packet_buffer_size;
+ ctx->skb_head->truesize += truesize;
}
skb_add_rx_frag(skb, num_frags, page_info->page,
- offset, len, packet_buffer_size);
+ offset, len, truesize);
return ctx->skb_head;
}
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
- const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
+ const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
/* "flip" to other packet buffer on this page */
- page_info->page_offset ^= PAGE_SIZE / 2;
+ page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
@@ -492,7 +486,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
memcpy(alloc_page_info.page_address, src, page_info->pad + len);
skb = gve_rx_add_frags(napi, &alloc_page_info,
- rx->packet_buffer_size,
+ PAGE_SIZE,
len, ctx);
u64_stats_update_begin(&rx->statss);
@@ -513,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL;
gve_dec_pagecnt_bias(copy_page_info);
- copy_page_info->page_offset += rx->packet_buffer_size;
- copy_page_info->page_offset &= (PAGE_SIZE - 1);
+ copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 9f6ffc4a54f0..07ba124780df 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
}
-#define GVE_TX_START_THRESH PAGE_SIZE
+#define GVE_TX_START_THRESH 4096
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 1e19b834a613..f59c4710f118 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -843,6 +843,16 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
return true;
}
+netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_is_gso(skb) && !gve_can_send_tso(skb))
+ return features & ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
/* Attempt to transmit specified SKB.
*
* Returns 0 if the SKB was transmitted or dropped.
@@ -854,11 +864,10 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
- if (tx->dqo.qpl) {
- if (skb_is_gso(skb))
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
+ if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto drop;
+ if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers
* none of the TX packet rules would be violated.
@@ -868,24 +877,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
*/
num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
} else {
- if (skb_is_gso(skb)) {
- /* If TSO doesn't meet HW requirements, attempt to linearize the
- * packet.
- */
- if (unlikely(!gve_can_send_tso(skb) &&
- skb_linearize(skb) < 0)) {
- net_err_ratelimited("%s: Failed to transmit TSO packet\n",
- priv->dev->name);
- goto drop;
- }
-
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
-
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
- } else {
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
-
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ if (!skb_is_gso(skb)) {
if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
if (unlikely(skb_linearize(skb) < 0))
goto drop;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 8f391e2adcc0..bdb7afaabdd0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -678,7 +678,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
- ethtool_sprintf(&buff, g_gmac_stats_string[i].desc);
+ ethtool_puts(&buff, g_gmac_stats_string[i].desc);
}
static int hns_gmac_get_sset_count(int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index fc26ffaae620..c58833eb4830 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -752,7 +752,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
return;
for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
- ethtool_sprintf(&buff, g_xgmac_stats_string[i].desc);
+ ethtool_puts(&buff, g_xgmac_stats_string[i].desc);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index b54f3706fb97..a5bb306b2cf1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -912,42 +912,41 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
if (stringset == ETH_SS_TEST) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
- ethtool_sprintf(&buff,
- hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
- ethtool_sprintf(&buff,
- hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
+ ethtool_puts(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
+ ethtool_puts(&buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
if ((netdev->phydev) && (!netdev->phydev->is_c45))
- ethtool_sprintf(&buff,
- hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
+ ethtool_puts(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
} else {
- ethtool_sprintf(&buff, "rx_packets");
- ethtool_sprintf(&buff, "tx_packets");
- ethtool_sprintf(&buff, "rx_bytes");
- ethtool_sprintf(&buff, "tx_bytes");
- ethtool_sprintf(&buff, "rx_errors");
- ethtool_sprintf(&buff, "tx_errors");
- ethtool_sprintf(&buff, "rx_dropped");
- ethtool_sprintf(&buff, "tx_dropped");
- ethtool_sprintf(&buff, "multicast");
- ethtool_sprintf(&buff, "collisions");
- ethtool_sprintf(&buff, "rx_over_errors");
- ethtool_sprintf(&buff, "rx_crc_errors");
- ethtool_sprintf(&buff, "rx_frame_errors");
- ethtool_sprintf(&buff, "rx_fifo_errors");
- ethtool_sprintf(&buff, "rx_missed_errors");
- ethtool_sprintf(&buff, "tx_aborted_errors");
- ethtool_sprintf(&buff, "tx_carrier_errors");
- ethtool_sprintf(&buff, "tx_fifo_errors");
- ethtool_sprintf(&buff, "tx_heartbeat_errors");
- ethtool_sprintf(&buff, "rx_length_errors");
- ethtool_sprintf(&buff, "tx_window_errors");
- ethtool_sprintf(&buff, "rx_compressed");
- ethtool_sprintf(&buff, "tx_compressed");
- ethtool_sprintf(&buff, "netdev_rx_dropped");
- ethtool_sprintf(&buff, "netdev_tx_dropped");
-
- ethtool_sprintf(&buff, "netdev_tx_timeout");
+ ethtool_puts(&buff, "rx_packets");
+ ethtool_puts(&buff, "tx_packets");
+ ethtool_puts(&buff, "rx_bytes");
+ ethtool_puts(&buff, "tx_bytes");
+ ethtool_puts(&buff, "rx_errors");
+ ethtool_puts(&buff, "tx_errors");
+ ethtool_puts(&buff, "rx_dropped");
+ ethtool_puts(&buff, "tx_dropped");
+ ethtool_puts(&buff, "multicast");
+ ethtool_puts(&buff, "collisions");
+ ethtool_puts(&buff, "rx_over_errors");
+ ethtool_puts(&buff, "rx_crc_errors");
+ ethtool_puts(&buff, "rx_frame_errors");
+ ethtool_puts(&buff, "rx_fifo_errors");
+ ethtool_puts(&buff, "rx_missed_errors");
+ ethtool_puts(&buff, "tx_aborted_errors");
+ ethtool_puts(&buff, "tx_carrier_errors");
+ ethtool_puts(&buff, "tx_fifo_errors");
+ ethtool_puts(&buff, "tx_heartbeat_errors");
+ ethtool_puts(&buff, "rx_length_errors");
+ ethtool_puts(&buff, "tx_window_errors");
+ ethtool_puts(&buff, "rx_compressed");
+ ethtool_puts(&buff, "tx_compressed");
+ ethtool_puts(&buff, "netdev_rx_dropped");
+ ethtool_puts(&buff, "netdev_tx_dropped");
+
+ ethtool_puts(&buff, "netdev_tx_timeout");
h->dev->ops->get_strings(h, stringset, buff);
}
@@ -1187,7 +1186,7 @@ hns_get_rss_indir_size(struct net_device *netdev)
}
static int
-hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+hns_get_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
@@ -1200,15 +1199,16 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
ops = priv->ae_handle->dev->ops;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- return ops->get_rss(priv->ae_handle, indir, key, hfunc);
+ return ops->get_rss(priv->ae_handle,
+ rxfh->indir, rxfh->key, &rxfh->hfunc);
}
static int
-hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+hns_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
@@ -1221,12 +1221,14 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
}
- return ops->set_rss(priv->ae_handle, indir, key, hfunc);
+ return ops->set_rss(priv->ae_handle,
+ rxfh->indir, rxfh->key, rxfh->hfunc);
}
static int hns_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b618797a7e8d..f1695c889d3a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1041,7 +1041,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
return;
order = get_order(alloc_size);
- if (order > MAX_ORDER) {
+ if (order > MAX_PAGE_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
return;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 682239f33082..999a0ee162a6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -941,19 +941,21 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
return ae_dev->dev_specs.rss_ind_tbl_size;
}
-static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int hns3_get_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
- return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
+ return h->ae_algo->ops->get_rss(h, rxfh->indir, rxfh->key,
+ &rxfh->hfunc);
}
-static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int hns3_set_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
@@ -962,19 +964,22 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
return -EOPNOTSUPP;
if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
- hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
- hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ rxfh->hfunc != ETH_RSS_HASH_TOP) ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP &&
+ rxfh->hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
- if (!indir) {
+ if (!rxfh->indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
return -EOPNOTSUPP;
}
- return h->ae_algo->ops->set_rss(h, indir, key, hfunc);
+ return h->ae_algo->ops->set_rss(h, rxfh->indir, rxfh->key,
+ rxfh->hfunc);
}
static int hns3_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index ff3f8f424ad9..9ec471ced3d6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -981,19 +981,24 @@ static const struct hclge_dbg_item tm_pri_items[] = {
static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
{
- char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
char content[HCLGE_DBG_TM_INFO_LEN];
u8 pri_num, sch_mode, weight, i, j;
+ char *data_str;
int pos, ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
+ data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
+ GFP_KERNEL);
+ if (!data_str)
+ return -ENOMEM;
+
for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i][0];
+ result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
NULL, ARRAY_SIZE(tm_pri_items));
@@ -1002,23 +1007,23 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
if (ret)
- return ret;
+ goto out;
ret = hclge_tm_get_pri_weight(hdev, i, &weight);
if (ret)
- return ret;
+ goto out;
ret = hclge_tm_get_pri_shaper(hdev, i,
HCLGE_OPC_TM_PRI_C_SHAPPING,
&c_shaper_para);
if (ret)
- return ret;
+ goto out;
ret = hclge_tm_get_pri_shaper(hdev, i,
HCLGE_OPC_TM_PRI_P_SHAPPING,
&p_shaper_para);
if (ret)
- return ret;
+ goto out;
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
@@ -1035,7 +1040,9 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "%s", content);
}
- return 0;
+out:
+ kfree(data_str);
+ return ret;
}
static const struct hclge_dbg_item tm_qset_items[] = {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index f4b680286911..0304f03d4093 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1137,7 +1137,7 @@ static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
static int hinic_get_rxfh(struct net_device *netdev,
- u32 *indir, u8 *key, u8 *hfunc)
+ struct ethtool_rxfh_param *rxfh)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u8 hash_engine_type = 0;
@@ -1146,32 +1146,33 @@ static int hinic_get_rxfh(struct net_device *netdev,
if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- if (hfunc) {
- err = hinic_rss_get_hash_engine(nic_dev,
- nic_dev->rss_tmpl_idx,
- &hash_engine_type);
- if (err)
- return -EFAULT;
+ err = hinic_rss_get_hash_engine(nic_dev,
+ nic_dev->rss_tmpl_idx,
+ &hash_engine_type);
+ if (err)
+ return -EFAULT;
- *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
- }
+ rxfh->hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
- if (indir) {
+ if (rxfh->indir) {
err = hinic_rss_get_indir_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, indir);
+ nic_dev->rss_tmpl_idx,
+ rxfh->indir);
if (err)
return -EFAULT;
}
- if (key)
+ if (rxfh->key)
err = hinic_rss_get_template_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, key);
+ nic_dev->rss_tmpl_idx,
+ rxfh->key);
return err;
}
-static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int hinic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
@@ -1179,11 +1180,12 @@ static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
- if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ if (rxfh->hfunc != ETH_RSS_HASH_TOP &&
+ rxfh->hfunc != ETH_RSS_HASH_XOR)
return -EOPNOTSUPP;
- nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
+ nic_dev->rss_hash_engine = (rxfh->hfunc == ETH_RSS_HASH_XOR) ?
HINIC_RSS_HASH_ENGINE_TYPE_XOR :
HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
err = hinic_rss_set_hash_engine
@@ -1193,7 +1195,7 @@ static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
return -EFAULT;
}
- err = __set_rss_rxfh(netdev, indir, key);
+ err = __set_rss_rxfh(netdev, rxfh->indir, rxfh->key);
return err;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4e18b4cefa97..94ac36b1408b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -48,7 +48,7 @@
* of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
* some padding.
*
- * But the size of a single DMA region is limited by MAX_ORDER in the
+ * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
* kernel (about 16MB currently). To support say 4K Jumbo frames, we
* use a set of LTBs (struct ltb_set) per pool.
*
@@ -75,7 +75,7 @@
* pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
* plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
*/
-#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_ORDER) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
#define IBMVNIC_LTB_SET_SIZE (38 << 20)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 06ddd7147c7f..d55638ad8704 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -299,6 +299,17 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_HWMON
+ bool "Intel(R) Ethernet Connection E800 Series Support HWMON support"
+ default y
+ depends on ICE && HWMON && !(ICE=y && HWMON=m)
+ help
+ Say Y if you want to expose thermal sensor data on Intel devices.
+
+ Some of our devices contain internal thermal sensors.
+ This data is available via the hwmon sysfs interface and exposes
+ the onboard sensors.
+
config ICE_SWITCHDEV
bool "Switchdev Support"
default y
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4542e2bc28e8..f9328f2e669f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -5,6 +5,7 @@
* Shared functions for accessing and configuring the MAC
*/
+#include <linux/bitfield.h>
#include "e1000.h"
static s32 e1000_check_downshift(struct e1000_hw *hw);
@@ -3260,8 +3261,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->mdix_mode =
- (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
- IGP01E1000_PSSR_MDIX_SHIFT);
+ (e1000_auto_x_mode)FIELD_GET(IGP01E1000_PSSR_MDIX, phy_data);
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -3272,11 +3272,11 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
/* Get cable length */
@@ -3326,14 +3326,12 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->extended_10bt_distance =
- ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
- M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ FIELD_GET(M88E1000_PSCR_10BT_EXT_DIST_ENABLE, phy_data) ?
e1000_10bt_ext_dist_enable_lower :
e1000_10bt_ext_dist_enable_normal;
phy_info->polarity_correction =
- ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
- M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ FIELD_GET(M88E1000_PSCR_POLARITY_REVERSAL, phy_data) ?
e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
/* Check polarity status */
@@ -3347,27 +3345,25 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->mdix_mode =
- (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
- M88E1000_PSSR_MDIX_SHIFT);
+ (e1000_auto_x_mode)FIELD_GET(M88E1000_PSSR_MDIX, phy_data);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
/* Cable Length Estimation and Local/Remote Receiver Information
* are only valid at 1000 Mbps.
*/
phy_info->cable_length =
- (e1000_cable_length) ((phy_data &
- M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ (e1000_cable_length)FIELD_GET(M88E1000_PSSR_CABLE_LENGTH,
+ phy_data);
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
}
@@ -3515,7 +3511,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
if (ret_val)
return ret_val;
eeprom_size =
- (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ FIELD_GET(EEPROM_SIZE_MASK, eeprom_size);
/* 256B eeprom size was not supported in earlier hardware, so we
* bump eeprom_size up one to ensure that "1" (which maps to
* 256B) is never the result used in the shifting logic below.
@@ -4891,8 +4887,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
&phy_data);
if (ret_val)
return ret_val;
- cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ cable_length = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
/* Convert the enum value to ranged values */
switch (cable_length) {
@@ -5001,8 +4996,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
&phy_data);
if (ret_val)
return ret_val;
- *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
- M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ *polarity = FIELD_GET(M88E1000_PSSR_REV_POLARITY, phy_data) ?
e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
} else if (hw->phy_type == e1000_phy_igp) {
@@ -5072,8 +5066,8 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
- M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ hw->speed_downgraded = FIELD_GET(M88E1000_PSSR_DOWNSHIFT,
+ phy_data);
}
return E1000_SUCCESS;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index be9c695dde12..4eb1ceaf865a 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -92,8 +92,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -1035,17 +1034,18 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
+ /* these next three accesses were always meant to use page 0x34 using
+ * GG82563_REG(0x34, N) but never did, so we've just corrected the call
+ * to not drop bits
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 4, 0xFFFF);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 9, &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 9, reg_data);
if (ret_val)
return ret_val;
ret_val =
@@ -1209,8 +1209,8 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
if (ret_val)
return ret_val;
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) |
+ E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1244,8 +1244,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
if (ret_val)
return ret_val;
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b1e890dd583..969f855a79ee 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -157,8 +157,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
fallthrough;
default:
nvm->type = e1000_nvm_eeprom_spi;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 63c3c79380a1..23a58cada43a 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -678,11 +678,8 @@
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCIE_LINK_STATUS 0x12
#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index a187582d2299..ba9c19e6994c 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
+ *
+ * Any given INCVALUE also has an associated maximum adjustment value. This
+ * maximum adjustment value is the largest increase (or decrease) which can be
+ * safely applied without overflowing the INCVALUE. Since INCVALUE has
+ * a maximum range of 24 bits, its largest value is 0xFFFFFF.
+ *
+ * To understand where the maximum value comes from, consider the following
+ * equation:
+ *
+ * new_incval = base_incval + (base_incval * adjustment) / 1billion
+ *
+ * To avoid overflow that means:
+ * max_incval = base_incval + (base_incval * max_adj) / billion
+ *
+ * Re-arranging:
+ * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion)
*/
#define INCVALUE_96MHZ 125
#define INCVALUE_SHIFT_96MHZ 17
#define INCPERIOD_SHIFT_96MHZ 2
#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
+#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */
#define INCVALUE_25MHZ 40
#define INCVALUE_SHIFT_25MHZ 18
#define INCPERIOD_25MHZ 1
+#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */
#define INCVALUE_24MHZ 125
#define INCVALUE_SHIFT_24MHZ 14
#define INCPERIOD_24MHZ 3
+#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */
#define INCVALUE_38400KHZ 26
#define INCVALUE_SHIFT_38400KHZ 19
#define INCPERIOD_38400KHZ 1
+#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9835e6a90d56..fc0f98ea6133 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -654,8 +654,8 @@ static void e1000_get_drvinfo(struct net_device *netdev,
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
+ FIELD_GET(0xF000, adapter->eeprom_vers),
+ FIELD_GET(0x0FF0, adapter->eeprom_vers),
(adapter->eeprom_vers & 0x000F));
strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
@@ -925,8 +925,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
if (mac->type >= e1000_pch_lpt)
- wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
- E1000_FWSM_WLOCK_MAC_SHIFT;
+ wlock_mac = FIELD_GET(E1000_FWSM_WLOCK_MAC_MASK, er32(FWSM));
for (i = 0; i < mac->rar_entry_count; i++) {
if (mac->type >= e1000_pch_lpt) {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 39e9fc601bf5..19e450a5bd31 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1072,13 +1072,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
- ((lat_enc & E1000_LTRV_SCALE_MASK)
- >> E1000_LTRV_SCALE_SHIFT)));
+ FIELD_GET(E1000_LTRV_SCALE_MASK, lat_enc)));
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
- (1U << (E1000_LTRV_SCALE_FACTOR *
- ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
- >> E1000_LTRV_SCALE_SHIFT)));
+ (1U << (E1000_LTRV_SCALE_FACTOR *
+ FIELD_GET(E1000_LTRV_SCALE_MASK, max_ltr_enc)));
if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc;
@@ -2075,8 +2073,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
{
u16 phy_data;
u32 strap = er32(STRAP);
- u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
- E1000_STRAP_SMT_FREQ_SHIFT;
+ u32 freq = FIELD_GET(E1000_STRAP_SMT_FREQ_MASK, strap);
s32 ret_val;
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -2562,8 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- (u16)((mac_reg & E1000_RAH_AV)
- >> 16));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
@@ -3205,7 +3201,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
&nvm_dword);
if (ret_val)
return ret_val;
- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ sig_byte = FIELD_GET(0xFF00, nvm_dword);
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 0;
@@ -3218,7 +3214,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
&nvm_dword);
if (ret_val)
return ret_val;
- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ sig_byte = FIELD_GET(0xFF00, nvm_dword);
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 1;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 5df7ad93f3d7..d7df2a0ed629 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+
#include "e1000.h"
/**
@@ -13,21 +15,17 @@
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct pci_dev *pdev = hw->adapter->pdev;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
- struct e1000_adapter *adapter = hw->adapter;
- u16 pcie_link_status, cap_offset;
+ u16 pcie_link_status;
- cap_offset = adapter->pdev->pcie_cap;
- if (!cap_offset) {
+ if (!pci_pcie_cap(pdev)) {
bus->width = e1000_bus_width_unknown;
} else {
- pci_read_config_word(adapter->pdev,
- cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status);
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &pcie_link_status);
+ bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
+ pcie_link_status);
}
mac->ops.set_lan_id(hw);
@@ -52,7 +50,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
* for the device regardless of function swap state.
*/
reg = er32(STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f536c856727c..af5d9d97a0d6 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1788,8 +1788,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
@@ -1868,8 +1867,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
@@ -5031,8 +5029,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
}
}
@@ -6249,7 +6246,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg |= BM_RCTL_MPE;
phy_reg &= ~(BM_RCTL_MO_MASK);
if (mac_reg & E1000_RCTL_MO_3)
- phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ phy_reg |= (FIELD_GET(E1000_RCTL_MO_3, mac_reg)
<< BM_RCTL_MO_SHIFT);
if (mac_reg & E1000_RCTL_BAM)
phy_reg |= BM_RCTL_BAM;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 08c3d477dd6f..5e329156d1ba 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -154,10 +154,9 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
e_dbg("MDI Read offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
return -E1000_ERR_PHY;
}
*data = (u16)mdic;
@@ -167,7 +166,6 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
*/
if (hw->mac.type == e1000_pch2lan)
udelay(100);
-
return 0;
}
@@ -218,10 +216,9 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
e_dbg("MDI Write PHY Red Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
e_dbg("MDI Write offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
return -E1000_ERR_PHY;
}
@@ -463,8 +460,8 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) |
+ E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -536,8 +533,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1793,8 +1789,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@@ -3234,8 +3229,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+ length = FIELD_GET(I82577_DSTATUS_CABLE_LENGTH, phy_data);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 02d871bc112a..bbcfd529399b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch2lan:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ break;
case e1000_pch_lpt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+ break;
case e1000_pch_spt:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ break;
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
@@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lnp:
case e1000_pch_ptp:
case e1000_pch_nvp:
- if ((hw->mac.type < e1000_pch_lpt) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
- adapter->ptp_clock_info.max_adj = 24000000 - 1;
- break;
- }
- fallthrough;
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
- adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 13a05604dcc0..1bc5b6c0b897 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1057,16 +1057,16 @@ static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
-static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int fm10k_get_rssh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ u8 *key = rxfh->key;
int i, err;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- err = fm10k_get_reta(netdev, indir);
+ err = fm10k_get_reta(netdev, rxfh->indir);
if (err || !key)
return err;
@@ -1076,23 +1076,25 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int fm10k_set_rssh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
int i, err;
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- err = fm10k_set_reta(netdev, indir);
- if (err || !key)
+ err = fm10k_set_reta(netdev, rxfh->indir);
+ if (err || !rxfh->key)
return err;
- for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
- u32 rssrk = le32_to_cpu(*(__le32 *)key);
+ for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) {
+ u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key);
if (interface->rssrk[i] == rssrk)
continue;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index af1b0cde3670..98861cc6df7c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2019 Intel Corporation. */
+#include <linux/bitfield.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -865,8 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
* register is RO from the VF, so the PF must do this even in the
* case of notifying the VF of a new VID via the mailbox.
*/
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
+ txqctl = FIELD_PREP(FM10K_TXQCTL_VID_MASK, vf_vid);
txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
@@ -1575,8 +1575,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
if (func & FM10K_FAULT_FUNC_PF)
fault->func = 0;
else
- fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
- FM10K_FAULT_FUNC_VF_SHIFT);
+ fault->func = 1 + FIELD_GET(FM10K_FAULT_FUNC_VF_MASK, func);
/* record fault type */
fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index dc8ccd378ec9..7fb1961f2921 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2019 Intel Corporation. */
+#include <linux/bitfield.h>
#include "fm10k_vf.h"
/**
@@ -126,15 +127,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
hw->mac.max_queues = i;
/* fetch default VLAN and ITR scale */
- hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) &
- FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+ hw->mac.default_vid = FIELD_GET(FM10K_TXQCTL_VID_MASK,
+ fm10k_read_reg(hw, FM10K_TXQCTL(0)));
/* Read the ITR scale from TDLEN. See the definition of
* FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is
* used here.
*/
- hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) &
- FM10K_TDLEN_ITR_SCALE_MASK) >>
- FM10K_TDLEN_ITR_SCALE_SHIFT;
+ hw->mac.itr_scale = FIELD_GET(FM10K_TDLEN_ITR_SCALE_MASK,
+ fm10k_read_reg(hw, FM10K_TDLEN(0)));
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1bf424ac3145..9b701615c7c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -24,6 +24,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
@@ -33,11 +34,11 @@
#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
- (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
+ (test_bit(I40E_HW_CAP_RSS_AQ, (pf)->hw.caps) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_MAX_VF_QUEUES 16
#define i40e_pf_get_max_q_per_tc(pf) \
- (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
+ (test_bit(I40E_HW_CAP_128_QP_RSS, (pf)->hw.caps) ? 128 : 64)
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
@@ -78,7 +79,7 @@
#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
/* driver state flags */
-enum i40e_state_t {
+enum i40e_state {
__I40E_TESTING,
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
@@ -126,7 +127,7 @@ enum i40e_state_t {
BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
-enum i40e_vsi_state_t {
+enum i40e_vsi_state {
__I40E_VSI_DOWN,
__I40E_VSI_NEEDS_RESTART,
__I40E_VSI_SYNCING_FILTERS,
@@ -138,6 +139,60 @@ enum i40e_vsi_state_t {
__I40E_VSI_STATE_SIZE__,
};
+enum i40e_pf_flags {
+ I40E_FLAG_MSI_ENA,
+ I40E_FLAG_MSIX_ENA,
+ I40E_FLAG_RSS_ENA,
+ I40E_FLAG_VMDQ_ENA,
+ I40E_FLAG_SRIOV_ENA,
+ I40E_FLAG_DCB_CAPABLE,
+ I40E_FLAG_DCB_ENA,
+ I40E_FLAG_FD_SB_ENA,
+ I40E_FLAG_FD_ATR_ENA,
+ I40E_FLAG_MFP_ENA,
+ I40E_FLAG_HW_ATR_EVICT_ENA,
+ I40E_FLAG_VEB_MODE_ENA,
+ I40E_FLAG_VEB_STATS_ENA,
+ I40E_FLAG_LINK_POLLING_ENA,
+ I40E_FLAG_TRUE_PROMISC_ENA,
+ I40E_FLAG_LEGACY_RX_ENA,
+ I40E_FLAG_PTP_ENA,
+ I40E_FLAG_IWARP_ENA,
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ I40E_FLAG_SOURCE_PRUNING_DIS,
+ I40E_FLAG_TC_MQPRIO_ENA,
+ I40E_FLAG_FD_SB_INACTIVE,
+ I40E_FLAG_FD_SB_TO_CLOUD_FILTER,
+ I40E_FLAG_FW_LLDP_DIS,
+ I40E_FLAG_RS_FEC,
+ I40E_FLAG_BASE_R_FEC,
+ /* TOTAL_PORT_SHUTDOWN_ENA
+ * Allows to physically disable the link on the NIC's port.
+ * If enabled, (after link down request from the OS)
+ * no link, traffic or led activity is possible on that port.
+ *
+ * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA is set, the
+ * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA must be explicitly forced
+ * to true and cannot be disabled by system admin at that time.
+ * The functionalities are exclusive in terms of configuration, but
+ * they also have similar behavior (allowing to disable physical
+ * link of the port), with following differences:
+ * - LINK_DOWN_ON_CLOSE_ENA is configurable at host OS run-time and
+ * is supported by whole family of 7xx Intel Ethernet Controllers
+ * - TOTAL_PORT_SHUTDOWN_ENA may be enabled only before OS loads
+ * (in BIOS) only if motherboard's BIOS and NIC's FW has support of it
+ * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought
+ * down by sending phy_type=0 to NIC's FW
+ * - when TOTAL_PORT_SHUTDOWN_ENA is used, phy_type is not altered,
+ * instead the link is being brought down by clearing
+ * bit (I40E_AQ_PHY_ENABLE_LINK) in abilities field of
+ * i40e_aq_set_phy_config structure
+ */
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
+ I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_PF_FLAGS_NBITS, /* must be last */
+};
+
enum i40e_interrupt_policy {
I40E_INTERRUPT_BEST_CASE,
I40E_INTERRUPT_MEDIUM,
@@ -413,9 +468,7 @@ struct i40e_pf {
struct i40e_hw hw;
DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
- bool fc_autoneg_status;
- u16 eeprom_version;
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
@@ -431,7 +484,6 @@ struct i40e_pf {
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */
- u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
@@ -469,89 +521,16 @@ struct i40e_pf {
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
- enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
u32 msg_enable;
char int_name[I40E_INT_NAME_STR_LEN];
- u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
- u32 hw_features;
-#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
-#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
-#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
-#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
-#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
-#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
-#define I40E_HW_NO_DCB_SUPPORT BIT(7)
-#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
-#define I40E_HW_PTP_L4_CAPABLE BIT(10)
-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
-#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
-#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
-#define I40E_HW_STOP_FW_LLDP BIT(16)
-#define I40E_HW_PORT_ID_VALID BIT(17)
-#define I40E_HW_RESTART_AUTONEG BIT(18)
-
- u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40E_FLAG_MSI_ENABLED BIT(1)
-#define I40E_FLAG_MSIX_ENABLED BIT(2)
-#define I40E_FLAG_RSS_ENABLED BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT(4)
-#define I40E_FLAG_SRIOV_ENABLED BIT(5)
-#define I40E_FLAG_DCB_CAPABLE BIT(6)
-#define I40E_FLAG_DCB_ENABLED BIT(7)
-#define I40E_FLAG_FD_SB_ENABLED BIT(8)
-#define I40E_FLAG_FD_ATR_ENABLED BIT(9)
-#define I40E_FLAG_MFP_ENABLED BIT(10)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
-#define I40E_FLAG_LEGACY_RX BIT(16)
-#define I40E_FLAG_PTP BIT(17)
-#define I40E_FLAG_IWARP_ENABLED BIT(18)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
-#define I40E_FLAG_TC_MQPRIO BIT(21)
-#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
-#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
-#define I40E_FLAG_RS_FEC BIT(25)
-#define I40E_FLAG_BASE_R_FEC BIT(26)
-/* TOTAL_PORT_SHUTDOWN
- * Allows to physically disable the link on the NIC's port.
- * If enabled, (after link down request from the OS)
- * no link, traffic or led activity is possible on that port.
- *
- * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the
- * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true
- * and cannot be disabled by system admin at that time.
- * The functionalities are exclusive in terms of configuration, but they also
- * have similar behavior (allowing to disable physical link of the port),
- * with following differences:
- * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is
- * supported by whole family of 7xx Intel Ethernet Controllers
- * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS)
- * only if motherboard's BIOS and NIC's FW has support of it
- * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down
- * by sending phy_type=0 to NIC's FW
- * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead
- * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK)
- * in abilities field of i40e_aq_set_phy_config structure
- */
-#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
-#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
-
+ DECLARE_BITMAP(flags, I40E_PF_FLAGS_NBITS);
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -559,7 +538,6 @@ struct i40e_pf {
u32 tx_timeout_count;
u32 tx_timeout_recovery_level;
unsigned long tx_timeout_last_recovery;
- u32 tx_sluggish_count;
u32 hw_csum_rx_error;
u32 led_status;
u16 corer_count; /* Core reset count */
@@ -581,17 +559,13 @@ struct i40e_pf {
struct i40e_lump_tracking *irq_pile;
/* switch config info */
- u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
bool cur_promisc;
- u16 instance; /* A unique number per i40e_pf instance in the system */
-
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -685,9 +659,7 @@ struct i40e_pf {
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
- struct work_struct ptp_pps_work;
struct work_struct ptp_extts0_work;
- struct work_struct ptp_extts1_work;
ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
@@ -695,10 +667,7 @@ struct i40e_pf {
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
- u64 ptp_pps_start;
- u32 pps_delay;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
- struct ptp_pin_desc ptp_pin[3];
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
@@ -711,7 +680,6 @@ struct i40e_pf {
u32 fd_inv;
u16 phy_led_val;
- u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
/* List to keep previous DDP profiles to be rolled back in the future */
@@ -1267,7 +1235,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
{
- return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+ return test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
}
#ifdef CONFIG_I40E_DCB
@@ -1301,7 +1269,7 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf);
int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
-void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
@@ -1321,13 +1289,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
* i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
* @pf: pointer to a pf.
*
- * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ * Check and return state of flag I40E_FLAG_TC_MQPRIO.
*
- * Return: I40E_FLAG_TC_MQPRIO set state.
+ * Return: true/false if I40E_FLAG_TC_MQPRIO is set or not
**/
-static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+static inline bool i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
{
- return pf->flags & I40E_FLAG_TC_MQPRIO;
+ return test_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9ce6e633cc2f..f73f5930fc58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -9,40 +9,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void i40e_adminq_init_regs(struct i40e_hw *hw)
-{
- /* set head and tail registers in our local struct */
- if (i40e_is_vf(hw)) {
- hw->aq.asq.tail = I40E_VF_ATQT1;
- hw->aq.asq.head = I40E_VF_ATQH1;
- hw->aq.asq.len = I40E_VF_ATQLEN1;
- hw->aq.asq.bal = I40E_VF_ATQBAL1;
- hw->aq.asq.bah = I40E_VF_ATQBAH1;
- hw->aq.arq.tail = I40E_VF_ARQT1;
- hw->aq.arq.head = I40E_VF_ARQH1;
- hw->aq.arq.len = I40E_VF_ARQLEN1;
- hw->aq.arq.bal = I40E_VF_ARQBAL1;
- hw->aq.arq.bah = I40E_VF_ARQBAH1;
- } else {
- hw->aq.asq.tail = I40E_PF_ATQT;
- hw->aq.asq.head = I40E_PF_ATQH;
- hw->aq.asq.len = I40E_PF_ATQLEN;
- hw->aq.asq.bal = I40E_PF_ATQBAL;
- hw->aq.asq.bah = I40E_PF_ATQBAH;
- hw->aq.arq.tail = I40E_PF_ARQT;
- hw->aq.arq.head = I40E_PF_ARQH;
- hw->aq.arq.len = I40E_PF_ARQLEN;
- hw->aq.arq.bal = I40E_PF_ARQBAL;
- hw->aq.arq.bah = I40E_PF_ARQBAH;
- }
-}
-
-/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
@@ -267,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, I40E_PF_ATQH, 0);
+ wr32(hw, I40E_PF_ATQT, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
+ reg = rd32(hw, I40E_PF_ATQBAL);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = -EIO;
@@ -296,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, I40E_PF_ARQH, 0);
+ wr32(hw, I40E_PF_ARQT, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+ wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
+ reg = rd32(hw, I40E_PF_ARQBAL);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = -EIO;
@@ -452,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
+ wr32(hw, I40E_PF_ATQH, 0);
+ wr32(hw, I40E_PF_ATQT, 0);
+ wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, I40E_PF_ATQBAL, 0);
+ wr32(hw, I40E_PF_ATQBAH, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
@@ -486,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
+ wr32(hw, I40E_PF_ARQH, 0);
+ wr32(hw, I40E_PF_ARQT, 0);
+ wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, I40E_PF_ARQBAL, 0);
+ wr32(hw, I40E_PF_ARQBAH, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
@@ -503,44 +469,76 @@ shutdown_arq_out:
}
/**
- * i40e_set_hw_flags - set HW flags
+ * i40e_set_hw_caps - set HW flags
* @hw: pointer to the hardware structure
**/
-static void i40e_set_hw_flags(struct i40e_hw *hw)
+static void i40e_set_hw_caps(struct i40e_hw *hw)
{
- struct i40e_adminq_info *aq = &hw->aq;
-
- hw->flags = 0;
+ bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS);
switch (hw->mac.type) {
case I40E_MAC_XL710:
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
+ set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
/* The ability to RX (not drop) 802.1ad frames */
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ set_bit(I40E_HW_CAP_802_1AD, hw->caps);
+ }
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5)) {
+ /* Supported in FW API version higher than 1.4 */
+ set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
+ }
+ if (i40e_is_fw_ver_lt(hw, 4, 33)) {
+ set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps);
+ /* No DCB support for FW < v4.33 */
+ set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps);
+ }
+ if (i40e_is_fw_ver_lt(hw, 4, 3)) {
+ /* Disable FW LLDP if FW < v4.3 */
+ set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps);
+ }
+ if (i40e_is_fw_ver_ge(hw, 4, 40)) {
+ /* Use the FW Set LLDP MIB API if FW >= v4.40 */
+ set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
+ }
+ if (i40e_is_fw_ver_ge(hw, 6, 0)) {
+ /* Enable PTP L4 if FW > v6.0 */
+ set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
}
break;
case I40E_MAC_X722:
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+ set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps);
+ set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
+ set_bit(I40E_HW_CAP_RSS_AQ, hw->caps);
+ set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps);
+ set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
+ set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps);
+ set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps);
+ set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps);
+ set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
+ set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
+ set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
+ set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps);
+ set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps);
+
+ if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) !=
+ I40E_FDEVICT_PCTYPE_DEFAULT) {
+ hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
+ clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
+ }
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_GET_LINK_INFO_X722))
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
- hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps);
fallthrough;
default:
@@ -548,22 +546,18 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
}
/* Newer versions of firmware require lock when reading the NVM */
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 5))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 8)) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
- hw->flags |= I40E_HW_FLAG_DROP_MODE;
- }
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
+ set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
+
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if (i40e_is_aq_api_ver_ge(hw, 1, 7))
+ set_bit(I40E_HW_CAP_802_1AD, hw->caps);
+
+ if (i40e_is_aq_api_ver_ge(hw, 1, 8))
+ set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 9))
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+ if (i40e_is_aq_api_ver_ge(hw, 1, 9))
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps);
}
/**
@@ -593,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* Set up register offsets */
- i40e_adminq_init_regs(hw);
-
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
@@ -633,7 +624,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
/* Some features were introduced in different FW API version
* for different MAC type.
*/
- i40e_set_hw_flags(hw);
+ i40e_set_hw_caps(hw);
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
@@ -648,25 +639,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
- if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
- }
- if (hw->mac.type == I40E_MAC_X722 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
- }
-
- /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 7))
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
-
- if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) {
ret_code = -EIO;
goto init_adminq_free_arq;
}
@@ -723,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
+ while (rd32(hw, I40E_PF_ATQH) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -759,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+ return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use;
}
@@ -800,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
hw->aq.asq_last_status = I40E_AQ_RC_OK;
- val = rd32(hw, hw->aq.asq.head);
+ val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
@@ -892,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+ wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
@@ -952,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = -EIO;
@@ -1106,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = -EALREADY;
@@ -1154,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
+ wr32(hw, I40E_PF_ARQT, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 80125bea80a2..ee86d2c53079 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -29,13 +29,6 @@ struct i40e_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
};
/* ASQ transaction details */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 18a1c3b6d72c..c8f35d4de271 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -5,6 +5,7 @@
#define _I40E_ADMINQ_CMD_H_
#include <linux/bits.h>
+#include <linux/types.h>
/* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d7e24d661724..de6ca6295742 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include <linux/avf/virtchnl.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
@@ -195,11 +196,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- I40E_PF_ATQLEN_ATQENABLE_MASK);
- else
+ /* Check if the queue is initialized */
+ if (!hw->aq.asq.count)
return false;
+
+ return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK);
}
/**
@@ -248,6 +249,7 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
int status;
+ u16 flags;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -260,23 +262,18 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+ vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
+ cmd_resp->flags = cpu_to_le16(flags);
status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
@@ -346,11 +343,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -669,11 +664,11 @@ int i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = true;
/* Determine port number and PF number*/
- port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
- >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK,
+ rd32(hw, I40E_PFGEN_PORTNUM));
hw->port = (u8)port;
- ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
- I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK,
+ rd32(hw, I40E_GLPCI_CAPSUP));
func_rid = rd32(hw, I40E_PF_FUNC_RID);
if (ari)
hw->pf_id = (u8)(func_rid & 0xff);
@@ -991,9 +986,8 @@ int i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
- I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
- I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK,
+ rd32(hw, I40E_GLGEN_RSTCTL));
/* It can take upto 15 secs for GRST steady state.
* Bump it to 16 secs max to be safe.
@@ -1085,26 +1079,20 @@ void i40e_clear_hw(struct i40e_hw *hw)
/* get number of interrupts, queues, and VFs */
val = rd32(hw, I40E_GLPCI_CNF2);
- num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
- I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
- num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
- I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+ num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val);
+ num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val);
val = rd32(hw, I40E_PFLAN_QALLOC);
- base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
- I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
- j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
- I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val);
+ j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val);
if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
num_queues = (j - base_queue) + 1;
else
num_queues = 0;
val = rd32(hw, I40E_PF_VT_PFALLOC);
- i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
- I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
- j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
- I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val);
+ j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val);
if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
num_vfs = (j - i) + 1;
else
@@ -1199,8 +1187,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
!hw->func_caps.led[idx])
return 0;
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
- I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val);
/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
* if it is not our port then ignore
@@ -1244,8 +1231,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
- I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val);
break;
}
@@ -1288,14 +1274,14 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
pin_func = I40E_PIN_FUNC_LED;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
- gpio_val |= ((pin_func <<
- I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
- I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ gpio_val |=
+ FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK,
+ pin_func);
}
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
- gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+ gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK,
+ mode);
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
@@ -1374,8 +1360,8 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
+ I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
} else {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
@@ -1645,12 +1631,11 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = false;
- if ((hw->mac.type == I40E_MAC_XL710) &&
- (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
- hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) &&
+ hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) &&
hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
@@ -1750,21 +1735,6 @@ int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
}
/**
- * i40e_is_aq_api_ver_ge
- * @aq: pointer to AdminQ info containing HW API version to compare
- * @maj: API major value
- * @min: API minor value
- *
- * Assert whether current HW API version is greater/equal than provided.
- **/
-static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
- u16 min)
-{
- return (aq->api_maj_ver > maj ||
- (aq->api_maj_ver == maj && aq->api_min_ver >= min));
-}
-
-/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -1890,14 +1860,14 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
cmd->valid_flags |=
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
@@ -2000,13 +1970,13 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
cmd->valid_flags |=
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
@@ -2253,7 +2223,7 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
- if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) {
scfg->switch_tag = cpu_to_le16(hw->switch_tag);
scfg->first_tag = cpu_to_le16(hw->first_tag);
scfg->second_tag = cpu_to_le16(hw->second_tag);
@@ -3530,8 +3500,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
- cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
@@ -3637,7 +3606,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
int status;
- if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Restore LLDP not supported by current FW version.\n");
return -ENODEV;
@@ -3680,7 +3649,7 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
if (persist) {
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
else
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3713,7 +3682,7 @@ int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
else
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3741,7 +3710,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
int status;
- if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
return -ENODEV;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -4212,8 +4181,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
val = rd32(hw, I40E_GLHMC_FCOEFMAX);
- fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
- >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val);
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return -EINVAL;
@@ -4249,30 +4217,25 @@ int i40e_set_filter_control(struct i40e_hw *hw,
/* Program required PE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
- val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num);
/* Program required PE contexts for the PF */
val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
- val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num);
/* Program required FCoE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
- val |= ((u32)settings->fcoe_filt_num <<
- I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK,
+ settings->fcoe_filt_num);
/* Program required FCoE DDP contexts for the PF */
val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
- val |= ((u32)settings->fcoe_cntx_num <<
- I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK,
+ settings->fcoe_cntx_num);
/* Program Hash LUT size for the PF */
val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
hash_lut_size = 1;
- val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
- I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size);
/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
if (settings->enable_fdir)
@@ -4673,8 +4636,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw,
"PHY: Can't write command to external PHY.\n");
} else {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
- *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
- I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
}
return status;
@@ -4783,8 +4745,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
if (!status) {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
- *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
- I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
} else {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't read register value from external PHY.\n");
@@ -5043,7 +5004,7 @@ static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
u32 i;
*reg_val = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5076,7 +5037,7 @@ static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
int status;
u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5115,7 +5076,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u8 port_num;
u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5238,14 +5199,14 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- bool use_register;
+ bool use_register = false;
int status = 0;
int retry = 5;
u32 val = 0;
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
+ if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
+ use_register = true;
+
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -5300,13 +5261,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- bool use_register;
+ bool use_register = false;
int status = 0;
int retry = 5;
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
+ if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
+ use_register = true;
+
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -5334,16 +5295,17 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
u8 mdio_num,
struct i40e_aqc_phy_register_access *cmd)
{
- if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
- cmd->cmd_flags |=
- I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
- ((mdio_num <<
- I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
- I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
- else
- i40e_debug(hw, I40E_DEBUG_PHY,
- "MDIO I/F number selection not supported by current FW version.\n");
+ if (!set_mdio ||
+ cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL)
+ return;
+
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) {
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK,
+ mdio_num);
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n");
}
}
@@ -5928,9 +5890,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(filters[i].element.flags) &
- I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
- I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ tnl_type = le16_get_bits(filters[i].element.flags,
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
@@ -6022,9 +5983,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(filters[i].element.flags) &
- I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
- I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ tnl_type = le16_get_bits(filters[i].element.flags,
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 68602fc375f6..8db1eb0c1768 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2021 Intel Corporation. */
+#include <linux/bitfield.h>
+#include "i40e_adminq.h"
#include "i40e_alloc.h"
#include "i40e_dcb.h"
#include "i40e_prototype.h"
@@ -20,8 +22,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
return -EINVAL;
reg = rd32(hw, I40E_PRTDCB_GENS);
- *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
- I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+ *status = FIELD_GET(I40E_PRTDCB_GENS_DCBX_STATUS_MASK, reg);
return 0;
}
@@ -50,12 +51,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
- etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
- I40E_IEEE_ETS_WILLING_SHIFT);
- etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
- I40E_IEEE_ETS_CBS_SHIFT);
- etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
- I40E_IEEE_ETS_MAXTC_SHIFT);
+ etscfg->willing = FIELD_GET(I40E_IEEE_ETS_WILLING_MASK, buf[offset]);
+ etscfg->cbs = FIELD_GET(I40E_IEEE_ETS_CBS_MASK, buf[offset]);
+ etscfg->maxtcs = FIELD_GET(I40E_IEEE_ETS_MAXTC_MASK, buf[offset]);
/* Move offset to Priority Assignment Table */
offset++;
@@ -69,11 +67,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
@@ -124,12 +120,10 @@ static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]);
+ dcbcfg->etsrec.prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]);
+ dcbcfg->etsrec.prioritytable[(i * 2) + 1] = priority;
offset++;
}
@@ -170,12 +164,9 @@ static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
- dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
- I40E_IEEE_PFC_WILLING_SHIFT);
- dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
- I40E_IEEE_PFC_MBC_SHIFT);
- dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
- I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.willing = FIELD_GET(I40E_IEEE_PFC_WILLING_MASK, buf[0]);
+ dcbcfg->pfc.mbc = FIELD_GET(I40E_IEEE_PFC_MBC_MASK, buf[0]);
+ dcbcfg->pfc.pfccap = FIELD_GET(I40E_IEEE_PFC_CAP_MASK, buf[0]);
dcbcfg->pfc.pfcenable = buf[1];
}
@@ -196,8 +187,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
u8 *buf;
typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
buf = tlv->tlvinfo;
/* The App priority table starts 5 octets after TLV header */
@@ -215,12 +205,10 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
while (offset < length) {
- dcbcfg->app[i].priority = (u8)((buf[offset] &
- I40E_IEEE_APP_PRIO_MASK) >>
- I40E_IEEE_APP_PRIO_SHIFT);
- dcbcfg->app[i].selector = (u8)((buf[offset] &
- I40E_IEEE_APP_SEL_MASK) >>
- I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].priority = FIELD_GET(I40E_IEEE_APP_PRIO_MASK,
+ buf[offset]);
+ dcbcfg->app[i].selector = FIELD_GET(I40E_IEEE_APP_SEL_MASK,
+ buf[offset]);
dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
@@ -248,8 +236,7 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype);
switch (subtype) {
case I40E_IEEE_SUBTYPE_ETS_CFG:
i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
@@ -299,11 +286,9 @@ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
+ priority = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK, buf[offset]);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK, buf[offset]);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
@@ -360,8 +345,7 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
u8 i;
typelength = ntohs(tlv->hdr.typelen);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
dcbcfg->numapps = length / sizeof(*app);
@@ -417,15 +401,13 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype);
/* Return if not CEE DCBX */
if (subtype != I40E_CEE_DCBX_TYPE)
return;
typelength = ntohs(tlv->typelength);
- tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ tlvlen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
sizeof(struct i40e_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
@@ -435,11 +417,8 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
typelength = ntohs(sub_tlv->hdr.typelen);
- sublen = (u16)((typelength &
- I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
- subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
+ sublen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
+ subtype = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength);
switch (subtype) {
case I40E_CEE_SUBTYPE_PG_CFG:
i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
@@ -476,8 +455,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
- oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
- I40E_LLDP_TLV_OUI_SHIFT);
+ oui = FIELD_GET(I40E_LLDP_TLV_OUI_MASK, ouisubtype);
switch (oui) {
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
@@ -515,10 +493,8 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib,
tlv = (struct i40e_lldp_org_tlv *)lldpmib;
while (1) {
typelength = ntohs(tlv->typelength);
- type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ type = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
offset += sizeof(typelength) + length;
/* END TLV or beyond LLDPDU size */
@@ -592,7 +568,7 @@ static void i40e_cee_to_dcb_v1_config(
{
u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err;
+ u8 i, err;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -601,13 +577,13 @@ static void i40e_cee_to_dcb_v1_config(
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i * 2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
+ u8 tc;
+
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK,
+ cee_cfg->oper_prio_tc[i]);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
}
@@ -629,8 +605,7 @@ static void i40e_cee_to_dcb_v1_config(
dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
- status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
- I40E_AQC_CEE_APP_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_APP_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
/* Add APPs if Error is False */
if (!err) {
@@ -639,22 +614,19 @@ static void i40e_cee_to_dcb_v1_config(
/* FCoE APP */
dcbcfg->app[0].priority =
- (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
- I40E_AQC_CEE_APP_FCOE_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio);
dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
/* iSCSI APP */
dcbcfg->app[1].priority =
- (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
- I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio);
dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
/* FIP APP */
dcbcfg->app[2].priority =
- (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
- I40E_AQC_CEE_APP_FIP_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio);
dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
}
@@ -673,7 +645,7 @@ static void i40e_cee_to_dcb_config(
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err, sync, oper;
+ u8 i, err, sync, oper;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -682,13 +654,13 @@ static void i40e_cee_to_dcb_config(
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i * 2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
+ u8 tc;
+
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK,
+ cee_cfg->oper_prio_tc[i]);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
}
@@ -711,8 +683,7 @@ static void i40e_cee_to_dcb_config(
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
i = 0;
- status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
- I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_FCOE_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -720,15 +691,13 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* FCoE APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
- I40E_AQC_CEE_APP_FCOE_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
i++;
}
- status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
- I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_ISCSI_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -736,15 +705,13 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* iSCSI APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
- I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
i++;
}
- status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
- I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_FIP_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -752,8 +719,7 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* FIP APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
- I40E_AQC_CEE_APP_FIP_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
i++;
@@ -804,14 +770,11 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
int ret = 0;
/* If Firmware version < v4.33 on X710/XL710, IEEE only */
- if ((hw->mac.type == I40E_MAC_XL710) &&
- (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)))
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 33))
return i40e_get_ieee_dcb_config(hw);
/* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
- if ((hw->mac.type == I40E_MAC_XL710) &&
- ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_eq(hw, 4, 33)) {
ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
sizeof(cee_v1_cfg), NULL);
if (!ret) {
@@ -877,7 +840,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return -EOPNOTSUPP;
/* Read LLDP NVM area */
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
u8 offset = 0;
if (hw->mac.type == I40E_MAC_XL710)
@@ -1189,7 +1152,7 @@ static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
selector = dcbcfg->app[i].selector & 0x7;
buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
- buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
/* Move to next app */
offset += 3;
i++;
@@ -1285,8 +1248,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
do {
i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
if (length)
offset += length + I40E_IEEE_TLV_HEADER_LENGTH;
/* END TLV or beyond LLDPDU size */
@@ -1321,20 +1283,16 @@ void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
u32 reg = rd32(hw, I40E_PRTDCB_RETSC);
reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK;
- reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) &
- I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MODE_MASK, ets_mode);
reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
- reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) &
- I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK, non_ets_mode);
reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
- reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) &
- I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK, max_exponent);
reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK;
- reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) &
- I40E_PRTDCB_RETSC_LLTC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_LLTC_MASK, lltc_map);
wr32(hw, I40E_PRTDCB_RETSC, reg);
}
@@ -1389,14 +1347,12 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
*/
reg = rd32(hw, I40E_PRT_SWR_PM_THR);
reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
- reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) &
- I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ reg |= FIELD_PREP(I40E_PRT_SWR_PM_THR_THRESHOLD_MASK, threshold);
wr32(hw, I40E_PRT_SWR_PM_THR, reg);
reg = rd32(hw, I40E_PRTDCB_RPPMC);
reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
- reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) &
- I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK, fifo_size);
wr32(hw, I40E_PRTDCB_RPPMC, reg);
}
@@ -1438,19 +1394,17 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK;
if (pfc_en) {
- reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) &
- I40E_PRTDCB_MFLCN_RPFCM_MASK;
- reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) &
- I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCM_MASK, 1);
+ reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCE_MASK,
+ pfc_en);
}
wr32(hw, I40E_PRTDCB_MFLCN, reg);
reg = rd32(hw, I40E_PRTDCB_FCCFG);
reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK;
if (pfc_en)
- reg |= (I40E_DCB_PFC_ENABLED <<
- I40E_PRTDCB_FCCFG_TFCE_SHIFT) &
- I40E_PRTDCB_FCCFG_TFCE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_FCCFG_TFCE_MASK,
+ I40E_DCB_PFC_ENABLED);
wr32(hw, I40E_PRTDCB_FCCFG, reg);
/* FCTTV and FCRTV to be set by default */
@@ -1468,25 +1422,22 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE);
reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
- reg |= ((u32)pfc_en <<
- I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK,
+ pfc_en);
wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg);
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE);
reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
- reg |= ((u32)pfc_en <<
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK,
+ pfc_en);
wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg);
for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) {
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i));
reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
if (pfc_en) {
- reg |= ((u32)refresh_time <<
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK,
+ refresh_time);
}
wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg);
}
@@ -1498,14 +1449,12 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg = rd32(hw, I40E_PRTDCB_TC2PFC);
reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
- reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) &
- I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_TC2PFC_TC2PFC_MASK, tc2pfc);
wr32(hw, I40E_PRTDCB_TC2PFC, reg);
reg = rd32(hw, I40E_PRTDCB_RUP);
reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK;
- reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) &
- I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RUP_NOVLANUP_MASK, first_pfc_prio);
wr32(hw, I40E_PRTDCB_RUP, reg);
reg = rd32(hw, I40E_PRTDCB_TDPMC);
@@ -1537,8 +1486,7 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
u32 reg = rd32(hw, I40E_PRTDCB_GENC);
reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK;
- reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) &
- I40E_PRTDCB_GENC_NUMTC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_GENC_NUMTC_MASK, num_tc);
wr32(hw, I40E_PRTDCB_GENC, reg);
}
@@ -1552,8 +1500,7 @@ u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
{
u32 reg = rd32(hw, I40E_PRTDCB_GENC);
- return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >>
- I40E_PRTDCB_GENC_NUMTC_SHIFT);
+ return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
}
/**
@@ -1576,13 +1523,13 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
- I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
- reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
- I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
- reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
- I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
- reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
- I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK,
+ bw_share[i]);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
+ mode[i]);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_ETSTC_MASK,
+ prio_type[i]);
wr32(hw, I40E_PRTDCB_RETSTCC(i), reg);
}
}
@@ -1722,8 +1669,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SLW);
reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
- reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
- I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SLW, reg);
}
@@ -1736,8 +1682,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SLT(i));
reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
- I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SLT(i), reg);
}
@@ -1746,8 +1692,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_DLW(i));
reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
- I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DLW(i), reg);
}
}
@@ -1758,8 +1704,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SHW);
reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
- reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
- I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SHW, reg);
}
@@ -1772,8 +1717,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SHT(i));
reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
- I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SHT(i), reg);
}
@@ -1782,8 +1727,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_DHW(i));
reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
- I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DHW(i), reg);
}
}
@@ -1793,8 +1738,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
new_val = new_pb_cfg->tc_pool_size[i];
reg = rd32(hw, I40E_PRTRPB_DPS(i));
reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) &
- I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DPS_DPS_TCN_MASK, new_val);
wr32(hw, I40E_PRTRPB_DPS(i), reg);
}
@@ -1802,8 +1746,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
new_val = new_pb_cfg->shared_pool_size;
reg = rd32(hw, I40E_PRTRPB_SPS);
reg &= ~I40E_PRTRPB_SPS_SPS_MASK;
- reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) &
- I40E_PRTRPB_SPS_SPS_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SPS_SPS_MASK, new_val);
wr32(hw, I40E_PRTRPB_SPS, reg);
/* Program the shared pool low water mark per port if increasing */
@@ -1812,8 +1755,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SLW);
reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
- reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
- I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SLW, reg);
}
@@ -1826,8 +1768,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SLT(i));
reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
- I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SLT(i), reg);
}
@@ -1836,8 +1778,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_DLW(i));
reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
- I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DLW(i), reg);
}
}
@@ -1848,8 +1790,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SHW);
reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
- reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
- I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SHW, reg);
}
@@ -1862,8 +1803,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SHT(i));
reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
- I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SHT(i), reg);
}
@@ -1872,8 +1813,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_DHW(i));
reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
- I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DHW(i), reg);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 6b60dc9b7736..d76497566e40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -43,7 +43,7 @@
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
-#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 077a95dad32c..b96a92187ab3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -21,8 +21,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
- I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+ *delay = FIELD_GET(I40E_PRTDCB_GENC_PFCLDA_MASK, val);
}
/**
@@ -310,8 +309,8 @@ static u8 i40e_dcbnl_getstate(struct net_device *netdev)
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
dev_dbg(&pf->pdev->dev, "DCB state=%d\n",
- !!(pf->flags & I40E_FLAG_DCB_ENABLED));
- return !!(pf->flags & I40E_FLAG_DCB_ENABLED);
+ test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0);
+ return test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0;
}
/**
@@ -331,19 +330,19 @@ static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state)
return ret;
dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n",
- state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0);
+ state, test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0);
/* Nothing to do */
- if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!state == !test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return ret;
if (i40e_is_sw_dcb(pf)) {
if (state) {
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
memcpy(&pf->hw.desired_dcbx_config,
&pf->hw.local_dcbx_config,
sizeof(struct i40e_dcbx_config));
} else {
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
}
} else {
/* Cannot directly manipulate FW LLDP Agent */
@@ -653,7 +652,7 @@ static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
{
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return I40E_DCBNL_STATUS_ERROR;
switch (capid) {
@@ -693,7 +692,7 @@ static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return -EINVAL;
*num = I40E_MAX_TRAFFIC_CLASS;
@@ -827,15 +826,12 @@ static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
u8 *perm_addr)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
- int i, j;
+ int i;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < dev->addr_len; i++)
perm_addr[i] = pf->hw.mac.perm_addr[i];
-
- for (j = 0; j < dev->addr_len; j++, i++)
- perm_addr[i] = pf->hw.mac.san_addr[j];
}
static const struct dcbnl_rtnl_ops dcbnl_ops = {
@@ -891,11 +887,11 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
return;
/* DCB not enabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi))
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(hw->func_caps.iscsi))
return;
dcbxcfg = &hw->local_dcbx_config;
@@ -1002,7 +998,7 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
int i;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi))
return;
for (i = 0; i < old_cfg->numapps; i++) {
@@ -1025,7 +1021,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* Not DCB capable */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return;
dev->dcbnl_ops = &dcbnl_ops;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index cf25bfc5dc3f..2f53f0f53bc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -81,8 +81,8 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
struct i40e_profile_info *old)
{
- unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
- unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_old = FIELD_GET(0x00FF0000, old->track_id);
+ unsigned int group_id_new = FIELD_GET(0x00FF0000, new->track_id);
/* 0x00 group must be only the first */
if (group_id_new == 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h
index 27ebc72d8bfe..e9871dfb32bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debug.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h
@@ -37,6 +37,7 @@ struct i40e_hw;
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A)
+#define hw_warn(hw, S, A...) dev_warn(i40e_hw_to_dev(hw), S, ##A)
#define i40e_debug(h, m, s, ...) \
do { \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 999c9708def5..ef70ddbe9c2f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" state[%d] = %08lx\n",
i, vsi->state[i]);
if (vsi == pf->vsi[pf->lan_vsi])
- dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
- pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev,
@@ -820,8 +819,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
@@ -1029,9 +1028,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"emp reset count: %d\n", pf->empr_count);
dev_info(&pf->pdev->dev,
"pf reset count: %d\n", pf->pfr_count);
- dev_info(&pf->pdev->dev,
- "pf tx sluggish count: %d\n",
- pf->tx_sluggish_count);
} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
struct i40e_aqc_query_port_ets_config_resp *bw_data;
struct i40e_dcbx_config *cfg =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index ece3a6b9a5c6..ab20202a3da3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -4,6 +4,7 @@
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
+#include <linux/types.h>
#include "i40e_adminq_cmd.h"
/* forward-declare the HW struct for the compiler */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index fd7163128c4d..c841779713f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -430,35 +430,35 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
struct i40e_priv_flags {
char flag_string[ETH_GSTRING_LEN];
- u64 flag;
+ u8 bitno;
bool read_only;
};
-#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+#define I40E_PRIV_FLAG(_name, _bitno, _read_only) { \
.flag_string = _name, \
- .flag = _flag, \
+ .bitno = _bitno, \
.read_only = _read_only, \
}
static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* NOTE: MFP setting cannot be changed */
- I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENA, 1),
I40E_PRIV_FLAG("total-port-shutdown",
- I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
- I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
- I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
- I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
- I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 1),
+ I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENA, 0),
+ I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0),
+ I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0),
I40E_PRIV_FLAG("link-down-on-close",
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
- I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, 0),
+ I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0),
I40E_PRIV_FLAG("disable-source-pruning",
- I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
- I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_FLAG_SOURCE_PRUNING_DIS, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
- I40E_FLAG_VF_VLAN_PRUNING, 0),
+ I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -466,7 +466,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* Private flags with a global effect, restricted to PF 0 */
static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("vf-true-promisc-support",
- I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
+ I40E_FLAG_TRUE_PROMISC_ENA, 0),
};
#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
@@ -502,7 +502,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
- if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -601,7 +601,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
10000baseKX4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
- !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
@@ -609,7 +609,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
10000baseKR_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
- !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
@@ -917,7 +917,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
- if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
if (hw_link_info->requested_speeds &
@@ -1488,12 +1488,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int status = 0;
- u32 flags = 0;
int err = 0;
- flags = READ_ONCE(pf->flags);
- i40e_set_fec_in_flags(fec_cfg, &flags);
-
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
@@ -1525,7 +1521,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
err = -EAGAIN;
goto done;
}
- pf->flags = flags;
+ i40e_set_fec_in_flags(fec_cfg, pf->flags);
status = i40e_update_link_info(hw);
if (status)
/* debug level message only due to relation to the link
@@ -1599,7 +1595,7 @@ static int i40e_set_fec_param(struct net_device *netdev,
return -EPERM;
if (hw->mac.type == I40E_MAC_X722 &&
- !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) {
netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
return -EOPNOTSUPP;
}
@@ -1915,7 +1911,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
last = true;
}
- offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i);
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
@@ -1956,9 +1952,8 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
val = X722_EEPROM_SCOPE_LIMIT + 1;
return val;
}
- val = (rd32(hw, I40E_GLPCI_LBARCTRL)
- & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
- >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ val = FIELD_GET(I40E_GLPCI_LBARCTRL_FL_SIZE_MASK,
+ rd32(hw, I40E_GLPCI_LBARCTRL));
/* register returns value in power of 2, 64Kbyte chunks. */
val = (64 * 1024) * BIT(val);
return val;
@@ -2015,6 +2010,18 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
+static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ return I40E_MAX_NUM_DESCRIPTORS_XL710;
+ default:
+ return I40E_MAX_NUM_DESCRIPTORS;
+ }
+}
+
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2024,8 +2031,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
- ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
+ ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0]->count;
@@ -2050,12 +2057,12 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
+ u32 new_rx_count, new_tx_count, max_num_descriptors;
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 new_rx_count, new_tx_count;
u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@@ -2063,14 +2070,15 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ max_num_descriptors = i40e_get_max_num_descriptors(pf);
+ if (ring->tx_pending > max_num_descriptors ||
ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
- ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending > max_num_descriptors ||
ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
netdev_info(netdev,
"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
ring->tx_pending, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors);
return -EINVAL;
}
@@ -2419,7 +2427,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
(pf->lan_veb < I40E_MAX_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags));
if (veb_stats) {
veb = pf->veb[pf->lan_veb];
@@ -2514,13 +2522,11 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
u8 *p = data;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&p, i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id != 0)
return;
for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -2548,7 +2554,7 @@ static int i40e_get_ts_info(struct net_device *dev,
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* only report HW timestamping if PTP is enabled */
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -2570,7 +2576,7 @@ static int i40e_get_ts_info(struct net_device *dev,
BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
- if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE)
+ if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
@@ -2819,10 +2825,10 @@ static int i40e_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) {
pf->led_status = i40e_led_get(hw);
} else {
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps))
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
NULL);
ret = i40e_led_get_phy(hw, &temp_status,
@@ -2831,25 +2837,25 @@ static int i40e_set_phys_id(struct net_device *netdev,
}
return blink_freq;
case ETHTOOL_ID_ON:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) {
i40e_led_set(hw, pf->led_status, false);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG));
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps))
i40e_aq_set_phy_debug(hw, 0, NULL);
}
break;
@@ -2886,7 +2892,6 @@ static int __i40e_get_coalesce(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
- ec->rx_max_coalesced_frames_irq = vsi->work_limit;
/* rx and tx usecs has per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
@@ -3020,7 +3025,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ if (ec->tx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
if (queue < 0) {
@@ -3278,7 +3283,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
} else if (valid) {
data->flex_word = value & I40E_USERDEF_FLEX_WORD;
data->flex_offset =
- (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+ FIELD_GET(I40E_USERDEF_FLEX_OFFSET, value);
data->flex_filter = true;
}
@@ -3628,7 +3633,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
return -EOPNOTSUPP;
@@ -3644,19 +3649,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps))
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps))
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps)) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
@@ -3666,7 +3674,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
break;
case UDP_V6_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps)) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
@@ -4644,7 +4653,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* main port cannot change them when in MFP mode as this would impact
* any filters on the other ports.
*/
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
return -EOPNOTSUPP;
}
@@ -4804,7 +4813,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL;
pf = vsi->back;
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return -EOPNOTSUPP;
if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
@@ -5001,7 +5010,7 @@ static void i40e_get_channels(struct net_device *dev,
ch->max_combined = i40e_max_channels(vsi);
/* report info for other vector */
- ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0;
ch->max_other = ch->other_count;
/* Note: This code assumes DCB is disabled for now. */
@@ -5044,7 +5053,7 @@ static int i40e_set_channels(struct net_device *dev,
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0))
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
@@ -5109,15 +5118,13 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
/**
* i40e_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Returns 0 on
* success.
**/
-static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int i40e_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5125,13 +5132,12 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
int ret;
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- seed = key;
+ seed = rxfh->key;
lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -5139,7 +5145,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (ret)
goto out;
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)(lut[i]);
+ rxfh->indir[i] = (u32)(lut[i]);
out:
kfree(lut);
@@ -5150,15 +5156,15 @@ out:
/**
* i40e_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int i40e_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5166,17 +5172,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
u8 *seed = NULL;
u16 i;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (key) {
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
if (!vsi->rss_lut_user) {
@@ -5186,9 +5193,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
else
i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
vsi->rss_size);
@@ -5215,11 +5222,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
u32 i, j, ret_flags = 0;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
- priv_flags = &i40e_gstrings_priv_flags[i];
+ priv_flag = &i40e_gstrings_priv_flags[i];
- if (priv_flags->flag & pf->flags)
+ if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i);
}
@@ -5227,11 +5234,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
return ret_flags;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
- priv_flags = &i40e_gl_gstrings_priv_flags[j];
+ priv_flag = &i40e_gl_gstrings_priv_flags[j];
- if (priv_flags->flag & pf->flags)
+ if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i + j);
}
@@ -5245,8 +5252,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
**/
static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
{
+ DECLARE_BITMAP(changed_flags, I40E_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- u64 orig_flags, new_flags, changed_flags;
enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -5254,51 +5263,57 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
int status;
u32 i, j;
- orig_flags = READ_ONCE(pf->flags);
- new_flags = orig_flags;
+ bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS);
+ bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS);
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40e_priv_flags *priv_flags;
-
- priv_flags = &i40e_gstrings_priv_flags[i];
+ const struct i40e_priv_flags *priv_flag;
+ bool new_val;
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
+ priv_flag = &i40e_gstrings_priv_flags[i];
+ new_val = (flags & BIT(i)) ? true : false;
/* If this is a read-only flag, it can't be changed */
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
+ if (priv_flag->read_only &&
+ test_bit(priv_flag->bitno, orig_flags) != new_val)
return -EOPNOTSUPP;
+
+ if (new_val)
+ set_bit(priv_flag->bitno, new_flags);
+ else
+ clear_bit(priv_flag->bitno, new_flags);
}
if (pf->hw.pf_id != 0)
goto flags_complete;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
+ bool new_val;
- priv_flags = &i40e_gl_gstrings_priv_flags[j];
-
- if (flags & BIT(i + j))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
+ priv_flag = &i40e_gl_gstrings_priv_flags[j];
+ new_val = (flags & BIT(i + j)) ? true : false;
/* If this is a read-only flag, it can't be changed */
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
+ if (priv_flag->read_only &&
+ test_bit(priv_flag->bitno, orig_flags) != new_val)
return -EOPNOTSUPP;
+
+ if (new_val)
+ set_bit(priv_flag->bitno, new_flags);
+ else
+ clear_bit(priv_flag->bitno, new_flags);
}
flags_complete:
- changed_flags = orig_flags ^ new_flags;
+ bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
- if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+
+ if (test_bit(I40E_FLAG_VEB_STATS_ENA, changed_flags) ||
+ test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) ||
+ test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, changed_flags))
reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
/* Before we finalize any flag changes, we need to perform some
@@ -5306,8 +5321,8 @@ flags_complete:
*/
/* ATR eviction is not supported on all devices */
- if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) &&
- !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, new_flags) &&
+ !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
@@ -5318,15 +5333,14 @@ flags_complete:
* disable LLDP, however we _must_ not allow the user to enable/disable
* LLDP with this flag on unsupported FW versions.
*/
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
- dev_warn(&pf->pdev->dev,
- "Device does not support changing FW LLDP\n");
- return -EOPNOTSUPP;
- }
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags) &&
+ !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FW LLDP\n");
+ return -EOPNOTSUPP;
}
- if (changed_flags & I40E_FLAG_RS_FEC &&
+ if (test_bit(I40E_FLAG_RS_FEC, changed_flags) &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B) {
dev_warn(&pf->pdev->dev,
@@ -5334,7 +5348,7 @@ flags_complete:
return -EOPNOTSUPP;
}
- if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ if (test_bit(I40E_FLAG_BASE_R_FEC, changed_flags) &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B &&
pf->hw.device_id != I40E_DEV_ID_KX_X722) {
@@ -5349,17 +5363,17 @@ flags_complete:
*/
/* Flush current ATR settings if ATR was disabled */
- if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, changed_flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA, new_flags)) {
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
- if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+ if (test_bit(I40E_FLAG_TRUE_PROMISC_ENA, changed_flags)) {
u16 sw_flags = 0, valid_flags = 0;
int ret;
- if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ if (!test_bit(I40E_FLAG_TRUE_PROMISC_ENA, new_flags))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
@@ -5374,17 +5388,17 @@ flags_complete:
}
}
- if ((changed_flags & I40E_FLAG_RS_FEC) ||
- (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ if (test_bit(I40E_FLAG_RS_FEC, changed_flags) ||
+ test_bit(I40E_FLAG_BASE_R_FEC, changed_flags)) {
u8 fec_cfg = 0;
- if (new_flags & I40E_FLAG_RS_FEC &&
- new_flags & I40E_FLAG_BASE_R_FEC) {
+ if (test_bit(I40E_FLAG_RS_FEC, new_flags) &&
+ test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) {
fec_cfg = I40E_AQ_SET_FEC_AUTO;
- } else if (new_flags & I40E_FLAG_RS_FEC) {
+ } else if (test_bit(I40E_FLAG_RS_FEC, new_flags)) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
I40E_AQ_SET_FEC_ABILITY_RS);
- } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
+ } else if (test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
I40E_AQ_SET_FEC_ABILITY_KR);
}
@@ -5392,35 +5406,35 @@ flags_complete:
dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
}
- if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
+ if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) &&
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, orig_flags)) {
dev_err(&pf->pdev->dev,
"Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ if (test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, changed_flags) &&
pf->num_alloc_vfs) {
dev_warn(&pf->pdev->dev,
"Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & I40E_FLAG_LEGACY_RX) &&
+ if (test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) &&
I40E_2K_TOO_SMALL_WITH_PADDING) {
dev_warn(&pf->pdev->dev,
"2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & new_flags &
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (new_flags & I40E_FLAG_MFP_ENABLED))
+ if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) &&
+ test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, new_flags) &&
+ test_bit(I40E_FLAG_MFP_ENA, new_flags))
dev_warn(&pf->pdev->dev,
"Turning on link-down-on-close flag may affect other partitions\n");
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, new_flags)) {
#ifdef CONFIG_I40E_DCB
i40e_dcb_sw_default_config(pf);
#endif /* CONFIG_I40E_DCB */
@@ -5461,7 +5475,7 @@ flags_complete:
* initialization or (b) while holding the RTNL lock, we don't need
* anything fancy here.
*/
- pf->flags = new_flags;
+ bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS);
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
@@ -5491,7 +5505,7 @@ static int i40e_get_module_info(struct net_device *netdev,
int status;
/* Check if firmware supports reading module EEPROM. */
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
return -EINVAL;
}
@@ -5571,8 +5585,8 @@ static int i40e_get_module_info(struct net_device *netdev,
modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
break;
default:
- netdev_err(vsi->netdev, "Module type unrecognized\n");
- return -EINVAL;
+ netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n");
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -5768,7 +5782,7 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
static const struct ethtool_ops i40e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH |
ETHTOOL_COALESCE_TX_USECS_HIGH,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d5519af34657..89a3401d20ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -377,7 +377,7 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
val = rd32(&pf->hw,
I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
tx_ring->vsi->base_vector - 1));
@@ -1203,11 +1203,9 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val);
nsd->rx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val);
i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
pf->stat_offsets_loaded,
&osd->tx_lpi_count, &nsd->tx_lpi_count);
@@ -1215,13 +1213,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
!test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
!test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
nsd->fd_atr_status = true;
else
@@ -1491,7 +1489,7 @@ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
return pvid;
is_any = (trusted ||
- !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+ !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags));
if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
@@ -1896,7 +1894,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
u8 *lut;
int ret;
- if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return 0;
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
@@ -2051,7 +2049,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
- else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_queue_pairs = pf->num_lan_msix;
else
vsi->num_queue_pairs = 1;
@@ -2064,7 +2062,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
else
num_tc_qps = vsi->alloc_queue_pairs;
- if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
/* Find numtc from enabled TC bitmap */
for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i)) /* TC is enabled */
@@ -2083,7 +2081,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Do not allow use more TC queue pairs than MSI-X vectors exist */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2095,8 +2093,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED)) ||
+ if ((!test_bit(I40E_FLAG_FD_SB_ENA,
+ pf->flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA,
+ pf->flags)) ||
vsi->tc_config.enabled_tc != 1) {
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
@@ -2482,7 +2482,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
* promisc behavior but will not get VF or VMDq traffic
@@ -2913,7 +2913,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
*/
static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
{
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags))
return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
@@ -3468,8 +3468,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
- ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) {
+ ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
ring->atr_count = 0;
} else {
ring->atr_sample_rate = 0;
@@ -3484,9 +3484,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED));
- tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags))
+ tx_ctx.fd_ena = 1;
+ if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags))
+ tx_ctx.timesync_ena = 1;
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
tx_ctx.head_wb_ena = 1;
@@ -3538,21 +3540,19 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
else
return -EINVAL;
- qtx_ctl |= (ring->ch->vsi_number <<
- I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ ring->ch->vsi_number);
} else {
if (vsi->type == I40E_VSI_VMDQ2) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
- qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ vsi->id);
} else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
}
}
- qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
- I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
int err = 0;
bool ok;
- int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
- if (ring->vsi->type == I40E_VSI_MAIN)
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ring->rx_buf_len =
- xsk_pool_get_rx_frame_size(ring->xsk_pool);
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
- if (ret)
- return ret;
+ if (err)
+ return err;
dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
- ring->rx_buf_len = vsi->rx_buf_len;
- if (ring->vsi->type == I40E_VSI_MAIN) {
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (ret)
- return ret;
- }
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
}
+skip:
xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
@@ -3669,7 +3684,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
}
/* configure Rx buffer alignment */
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) {
if (I40E_2K_TOO_SMALL_WITH_PADDING) {
dev_info(&vsi->back->pdev->dev,
"2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
@@ -3767,7 +3782,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
for (i = 0; i < vsi->num_queue_pairs; i++) {
rx_ring = vsi->rx_rings[i];
@@ -3834,7 +3849,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return;
/* Reset FDir counters as we're replaying all existing filters */
@@ -3972,10 +3987,10 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, val);
@@ -4211,7 +4226,7 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
}
/* disable each interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
@@ -4237,7 +4252,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
int i;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
@@ -4258,7 +4273,7 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
i40e_flush(&pf->hw);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4293,7 +4308,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0 & I40E_PFINT_ICR0_SWINT_MASK))
pf->sw_int_count++;
- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
(icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
@@ -4344,8 +4359,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT);
- val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
- >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val);
if (val == I40E_RESET_CORER) {
pf->corer_count++;
} else if (val == I40E_RESET_GLOBR) {
@@ -4486,7 +4500,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags))
i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
return budget > 0;
@@ -4599,9 +4613,9 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
struct i40e_pf *pf = vsi->back;
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
err = i40e_vsi_request_irq_msix(vsi, basename);
- else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags))
err = request_irq(pf->pdev->irq, i40e_intr, 0,
pf->int_name, pf);
else
@@ -4633,7 +4647,7 @@ static void i40e_netpoll(struct net_device *netdev)
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
@@ -4912,27 +4926,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int pf_q, err, q_end;
+ u32 pf_q, tx_q_end, rx_q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- q_end = vsi->base_queue + vsi->num_queue_pairs;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
- i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+ tx_q_end = vsi->base_queue +
+ vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
- err = i40e_control_wait_rx_q(pf, pf_q, false);
- if (err)
- dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d disable timeout\n",
- vsi->seid, pf_q);
- }
+ rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
+ i40e_control_rx_q(pf, pf_q, false);
msleep(I40E_DISABLE_TX_GAP_MSEC);
- pf_q = vsi->base_queue;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
@@ -4973,7 +4983,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u32 val, qp;
int i;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
if (!vsi->q_vectors)
return;
@@ -5007,8 +5017,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
* next_q field of the registers.
*/
val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
- qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
- >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK,
+ val);
val |= I40E_QUEUE_END_OF_LIST
<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
@@ -5030,8 +5040,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
val = rd32(hw, I40E_QINT_TQCTL(qp));
- next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
- >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+ next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK,
+ val);
val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
I40E_QINT_TQCTL_MSIX0_INDX_MASK |
@@ -5049,8 +5059,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
free_irq(pf->pdev->irq, pf);
val = rd32(hw, I40E_PFINT_LNKLST0);
- qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
- >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val);
val |= I40E_QUEUE_END_OF_LIST
<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
wr32(hw, I40E_PFINT_LNKLST0, val);
@@ -5135,16 +5144,17 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
{
/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
pci_disable_msix(pf->pdev);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
kfree(pf->irq_pile);
pf->irq_pile = NULL;
- } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
pci_disable_msi(pf->pdev);
}
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+ clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
}
/**
@@ -5346,7 +5356,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
int v, ret = 0;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v]) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
@@ -5484,11 +5494,11 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return 1;
/* SFP mode will be enabled for all TCs on port */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
return i40e_dcb_get_num_tc(dcbcfg);
/* MFP mode return count of enabled TCs for this PF */
@@ -5518,11 +5528,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
/* If neither MQPRIO nor DCB is enabled for this PF then just return
* default TC
*/
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
/* MFP enabled and iSCSI PF type */
@@ -5611,7 +5621,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
/* There is no need to reset BW when mqprio mode is on. */
if (i40e_is_tc_mqprio_enabled(pf))
return 0;
- if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&pf->pdev->dev,
@@ -5864,7 +5874,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
}
vsi->reconfig_rss = false;
}
- if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
@@ -6277,7 +6287,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
if (ch->type == I40E_VSI_VMDQ2)
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
@@ -6582,8 +6592,8 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
* VSI to be added switch to VEB mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
if (vsi->type == I40E_VSI_MAIN) {
if (i40e_is_tc_mqprio_enabled(pf))
@@ -6994,9 +7004,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (need_reconfig) {
/* Enable DCB tagging only when more than one TC */
if (new_numtc > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
@@ -7086,7 +7096,7 @@ out:
set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
/* registers are set, lets apply */
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps))
ret = i40e_hw_set_dcb_config(pf, new_cfg);
}
@@ -7107,7 +7117,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err;
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
+ if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) {
/* Update the local cached instance with TC0 ETS */
memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
@@ -7168,12 +7178,12 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
* Also do not enable DCBx if FW LLDP agent is disabled
*/
- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
+ if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) {
dev_info(&pf->pdev->dev, "DCB is not supported.\n");
err = -EOPNOTSUPP;
goto out;
}
- if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) {
dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
err = i40e_dcb_sw_default_config(pf);
if (err) {
@@ -7184,8 +7194,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->dcbx_cap = DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
/* at init capable but disabled */
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
goto out;
}
err = i40e_init_dcb(hw, true);
@@ -7200,20 +7210,20 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Enable DCB tagging only when more than one TC
* or explicitly disable if only one TC
*/
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
- pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
@@ -7373,7 +7383,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
i40e_vsi_configure_msix(vsi);
else
i40e_configure_msi_and_legacy(vsi);
@@ -7477,7 +7487,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
* and its speed values are OK, no need for a flap
* if non_zero_phy_type was set, still need to force up
*/
- if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+ if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags))
non_zero_phy_type = true;
else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
return 0;
@@ -7493,7 +7503,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
/* Copy the old settings, except of phy_type */
config.abilities = abilities.abilities;
- if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+ if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
if (is_up)
config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
else
@@ -7543,8 +7553,8 @@ int i40e_up(struct i40e_vsi *vsi)
int err;
if (vsi->type == I40E_VSI_MAIN &&
- (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
- vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
i40e_force_link_state(vsi->back, true);
err = i40e_vsi_configure(vsi);
@@ -7572,8 +7582,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
i40e_vsi_stop_rings(vsi);
if (vsi->type == I40E_VSI_MAIN &&
- (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
- vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
i40e_force_link_state(vsi->back, false);
i40e_napi_disable_all(vsi);
@@ -7979,7 +7989,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
struct i40e_fwd_adapter *fwd;
int avail_macvlan, ret;
- if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
@@ -8174,23 +8184,23 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
hw = mqprio_qopt->qopt.hw;
mode = mqprio_qopt->mode;
if (!hw) {
- pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
goto config_tc;
}
/* Check if MFP enabled */
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
netdev_info(netdev,
"Configuring TC not supported in MFP mode\n");
return ret;
}
switch (mode) {
case TC_MQPRIO_MODE_DCB:
- pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
/* Check if DCB enabled to continue */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev,
"DCB is not enabled for adapter\n");
return ret;
@@ -8204,20 +8214,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
}
break;
case TC_MQPRIO_MODE_CHANNEL:
- if (pf->flags & I40E_FLAG_DCB_ENABLED) {
+ if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev,
"Full offload of TC Mqprio options is not supported when DCB is enabled\n");
return ret;
}
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return ret;
ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
if (ret)
return ret;
memcpy(&vsi->mqprio_qopt, mqprio_qopt,
sizeof(*mqprio_qopt));
- pf->flags |= I40E_FLAG_TC_MQPRIO;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
break;
default:
return -EINVAL;
@@ -8801,11 +8811,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EINVAL;
}
- if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) {
dev_err(&vsi->back->pdev->dev,
"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
- vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags);
}
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
@@ -8901,11 +8911,11 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
pf->num_cloud_filters--;
if (!pf->num_cloud_filters)
- if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
- !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
return 0;
}
@@ -9206,11 +9216,11 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf)
}
pf->num_cloud_filters = 0;
- if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
- !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
}
@@ -9298,7 +9308,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, true, lock_acquired);
dev_info(&pf->pdev->dev,
- pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
@@ -9413,12 +9423,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
(hw->phy.link_info.link_speed &
~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
- !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
/* let firmware decide if the DCB should be disabled */
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -9454,7 +9464,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
@@ -9482,9 +9492,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
@@ -9552,18 +9562,18 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
queue, qtx_ctl);
+ if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) !=
+ I40E_QTX_CTL_VF_QUEUE)
+ return;
+
/* Queue belongs to VF, find the VF and issue VF reset */
- if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
- >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
- vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
- >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
- vf_id -= hw->func_caps.vf_base_id;
- vf = &pf->vf[vf_id];
- i40e_vc_notify_vf_reset(vf);
- /* Allow VF to process pending reset notification */
- msleep(20);
- i40e_reset_vf(vf, false);
- }
+ vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
}
/**
@@ -9589,8 +9599,7 @@ u32 i40e_get_current_fd_count(struct i40e_pf *pf)
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
- ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
- I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val);
return fcnt_prog;
}
@@ -9604,8 +9613,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
- ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
- I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val);
return fcnt_prog;
}
@@ -9616,7 +9624,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
{
if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
@@ -9637,7 +9645,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
@@ -9952,7 +9960,7 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
i40e_ptp_set_increment(pf);
#ifdef CONFIG_I40E_DCB
if (new_link == old_link)
@@ -9969,13 +9977,13 @@ static void i40e_link_event(struct i40e_pf *pf)
memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
err = i40e_dcb_sw_default_config(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
} else {
pf->dcbx_cap = DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
}
}
#endif /* CONFIG_I40E_DCB */
@@ -10000,7 +10008,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) ||
test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
i40e_link_event(pf);
@@ -10011,7 +10019,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
- if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+ if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
@@ -10100,7 +10108,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)) &&
- (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+ (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) {
dev_err(&pf->pdev->dev,
"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
dev_err(&pf->pdev->dev,
@@ -10128,7 +10136,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
/* check for error indications */
- val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ val = rd32(&pf->hw, I40E_PF_ARQLEN);
oldval = val;
if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
if (hw->debug_mask & I40E_DEBUG_AQ)
@@ -10147,9 +10155,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
}
if (oldval != val)
- wr32(&pf->hw, pf->hw.aq.arq.len, val);
+ wr32(&pf->hw, I40E_PF_ARQLEN, val);
- val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ val = rd32(&pf->hw, I40E_PF_ATQLEN);
oldval = val;
if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
if (pf->hw.debug_mask & I40E_DEBUG_AQ)
@@ -10167,7 +10175,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
}
if (oldval != val)
- wr32(&pf->hw, pf->hw.aq.asq.len, val);
+ wr32(&pf->hw, I40E_PF_ATQLEN, val);
event.buf_len = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
@@ -10227,9 +10235,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode);
break;
}
- } while (i++ < pf->adminq_work_limit);
+ } while (i++ < I40E_AQ_WORK_LIMIT);
- if (i < pf->adminq_work_limit)
+ if (i < I40E_AQ_WORK_LIMIT)
clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt cause */
@@ -10406,7 +10414,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
veb->bridge_mode = BRIDGE_MODE_VEB;
else
veb->bridge_mode = BRIDGE_MODE_VEPA;
@@ -10551,7 +10559,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
}
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return;
/* find existing VSI and see if it needs configuring */
@@ -10563,8 +10571,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
return;
}
}
@@ -10942,14 +10950,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_set_dcb_parameters(hw, false, NULL);
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
} else {
i40e_aq_set_dcb_parameters(hw, true, NULL);
ret = i40e_init_pf_dcb(pf);
if (ret) {
dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
ret);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Continue without DCB enabled */
}
}
@@ -11070,7 +11078,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
wr32(hw, I40E_REG_MSS, val);
}
- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
@@ -11080,7 +11088,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
ret = i40e_setup_misc_vector(pf);
if (ret)
goto end_unlock;
@@ -11187,14 +11195,10 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
- u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
- I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
- I40E_GL_MDET_TX_VF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
- I40E_GL_MDET_TX_EVENT_SHIFT;
- u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg);
+ u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg);
+ u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg);
+ u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) -
pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
@@ -11204,12 +11208,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
- u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
- I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
- I40E_GL_MDET_RX_EVENT_SHIFT;
- u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg);
+ u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg);
+ u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) -
pf->hw.func_caps.base_queue;
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
@@ -11355,7 +11356,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
if (!vsi->num_rx_desc)
vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_q_vectors = pf->num_lan_msix;
else
vsi->num_q_vectors = 1;
@@ -11673,7 +11674,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
WRITE_ONCE(vsi->tx_rings[i], ring++);
@@ -11690,7 +11691,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
@@ -11754,7 +11755,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
int v_actual;
int iwarp_requested = 0;
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return -ENODEV;
/* The number of vectors we'll request will be comprised of:
@@ -11793,7 +11794,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left -= pf->num_lan_msix;
/* reserve one vector for sideband flow director */
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
if (vectors_left) {
pf->num_fdsb_msix = 1;
v_budget++;
@@ -11804,7 +11805,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/* can we reserve enough for iWARP? */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
iwarp_requested = pf->num_iwarp_msix;
if (!vectors_left)
@@ -11816,7 +11817,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/* any vectors left over go for VMDq support */
- if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) {
if (!vectors_left) {
pf->num_vmdq_msix = 0;
pf->num_vmdq_qps = 0;
@@ -11873,7 +11874,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
if (v_actual < I40E_MIN_MSIX) {
- pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
pci_disable_msix(pf->pdev);
@@ -11911,7 +11912,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->num_lan_msix = 1;
pf->num_iwarp_msix = 1;
} else {
@@ -11919,7 +11920,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
break;
default:
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->num_iwarp_msix = min_t(int, (vec / 3),
iwarp_requested);
pf->num_vmdq_vsis = min_t(int, (vec / 3),
@@ -11928,7 +11929,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
pf->num_fdsb_msix = 1;
vec--;
}
@@ -11940,22 +11941,20 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->num_fdsb_msix == 0)) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) {
dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
- (pf->num_vmdq_msix == 0)) {
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
}
- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (pf->num_iwarp_msix == 0)) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
+ pf->num_iwarp_msix == 0) {
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
}
i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
@@ -12009,7 +12008,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_q_vectors = vsi->num_q_vectors;
else if (vsi == pf->vsi[pf->lan_vsi])
num_q_vectors = 1;
@@ -12040,38 +12039,39 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
int vectors = 0;
ssize_t size;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
vectors = i40e_init_msix(pf);
if (vectors < 0) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
}
}
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
vectors = pci_enable_msi(pf->pdev);
if (vectors < 0) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
vectors);
- pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
}
vectors = 1; /* one MSI or Legacy vector */
}
- if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* set up vector assignment tracking */
@@ -12104,7 +12104,8 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
* scheme. We need to re-enabled them here in order to attempt to
* re-acquire the MSI or MSI-X vectors
*/
- pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+ set_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
err = i40e_init_interrupt_scheme(pf);
if (err)
@@ -12126,7 +12127,7 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
if (err)
goto err_unwind;
- if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
i40e_client_update_msix_info(pf);
return 0;
@@ -12154,7 +12155,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
{
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
err = i40e_setup_misc_vector(pf);
if (err) {
@@ -12164,7 +12165,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
return err;
}
} else {
- u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+ u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED;
err = request_irq(pf->pdev->irq, i40e_intr, flags,
pf->int_name, pf);
@@ -12368,7 +12369,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return i40e_config_rss_aq(vsi, seed, lut, lut_size);
else
return i40e_config_rss_reg(vsi, seed, lut, lut_size);
@@ -12387,7 +12388,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return i40e_get_rss_aq(vsi, seed, lut, lut_size);
else
return i40e_get_rss_reg(vsi, seed, lut, lut_size);
@@ -12490,7 +12491,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int new_rss_size;
- if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
return 0;
queue_count = min_t(int, queue_count, num_online_cpus());
@@ -12723,9 +12724,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
u16 pow;
/* Set default capability flags */
- pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
- I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_MSIX_ENABLED;
+ bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS);
+ set_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_RX_DEF;
@@ -12745,14 +12746,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
if (pf->hw.func_caps.rss) {
- pf->flags |= I40E_FLAG_RSS_ENABLED;
+ set_bit(I40E_FLAG_RSS_ENA, pf->flags);
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
num_online_cpus());
}
/* MFP mode enabled */
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
+ set_bit(I40E_FLAG_MFP_ENA, pf->flags);
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
@@ -12769,84 +12770,31 @@ static int i40e_sw_init(struct i40e_pf *pf)
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+ set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
pf->hw.num_partitions > 1)
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
else
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
pf->hw.fdir_shared_filter_count =
pf->hw.func_caps.fd_filters_best_effort;
}
- if (pf->hw.mac.type == I40E_MAC_X722) {
- pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
- I40E_HW_128_QP_RSS_CAPABLE |
- I40E_HW_ATR_EVICT_CAPABLE |
- I40E_HW_WB_ON_ITR_CAPABLE |
- I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
- I40E_HW_NO_PCI_LINK_CHECK |
- I40E_HW_USE_SET_LLDP_MIB |
- I40E_HW_GENEVE_OFFLOAD_CAPABLE |
- I40E_HW_PTP_L4_CAPABLE |
- I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
- I40E_HW_OUTER_UDP_CSUM_CAPABLE);
-
-#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
- if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
- I40E_FDEVICT_PCTYPE_DEFAULT) {
- dev_warn(&pf->pdev->dev,
- "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
- pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
- }
- } else if ((pf->hw.aq.api_maj_ver > 1) ||
- ((pf->hw.aq.api_maj_ver == 1) &&
- (pf->hw.aq.api_min_ver > 4))) {
- /* Supported in FW API version higher than 1.4 */
- pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
- }
-
/* Enable HW ATR eviction if possible */
- if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
- pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
-
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
- (pf->hw.aq.fw_maj_ver < 4))) {
- pf->hw_features |= I40E_HW_RESTART_AUTONEG;
- /* No DCB support for FW < v4.33 */
- pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
- }
-
- /* Disable FW LLDP if FW < v4.3 */
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
- (pf->hw.aq.fw_maj_ver < 4)))
- pf->hw_features |= I40E_HW_STOP_FW_LLDP;
-
- /* Use the FW Set LLDP MIB API if FW > v4.40 */
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
- (pf->hw.aq.fw_maj_ver >= 5)))
- pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
-
- /* Enable PTP L4 if FW > v6.0 */
- if (pf->hw.mac.type == I40E_MAC_XL710 &&
- pf->hw.aq.fw_maj_ver >= 6)
- pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
+ if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
+ set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags);
if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ set_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
- pf->flags |= I40E_FLAG_IWARP_ENABLED;
+ set_bit(I40E_FLAG_IWARP_ENA, pf->flags);
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
@@ -12856,25 +12804,23 @@ static int i40e_sw_init(struct i40e_pf *pf)
* if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- pf->hw.func_caps.npar_enable &&
- (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
- pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ pf->hw.func_caps.npar_enable)
+ clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
- pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ set_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
- pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
/* By default FW has this off for performance reasons */
- pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+ clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
/* set up queue assignment tracking */
size = sizeof(struct i40e_lump_tracking)
@@ -12893,8 +12839,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Link down on close must be on when total port shutdown
* is enabled for a given port
*/
- pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+ set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+ set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
dev_info(&pf->pdev->dev,
"total-port-shutdown was enabled, link-down-on-close is forced on\n");
}
@@ -12920,31 +12866,31 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
*/
if (features & NETIF_F_NTUPLE) {
/* Enable filters and mark for reset */
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
need_reset = true;
/* enable FD_SB only if there is MSI-X vector and no cloud
* filters exist
*/
if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
} else {
/* turn off filters, mark for reset and clear SW filter list */
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
/* reset fd counters */
pf->fd_add_err = 0;
pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
@@ -13093,7 +13039,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
+ if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps))
return -EOPNOTSUPP;
ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
@@ -13122,7 +13068,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct i40e_pf *pf = np->vsi->back;
int err = 0;
- if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags))
return -EOPNOTSUPP;
if (vid) {
@@ -13222,9 +13168,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
veb->bridge_mode = mode;
/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
if (mode == BRIDGE_MODE_VEB)
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
break;
}
@@ -13558,7 +13504,7 @@ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
struct i40e_hw *hw = &pf->hw;
/* All rings in a qp belong to the same qvector. */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
else
i40e_irq_dynamic_enable_icr0(pf);
@@ -13583,7 +13529,7 @@ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
*
* All rings in a qp belong to the same qvector.
*/
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
@@ -13614,9 +13560,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -13768,7 +13714,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_RXCSUM |
0;
- if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
@@ -13804,7 +13750,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
@@ -13995,7 +13941,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* negative logic - if it's set, we need to fiddle with
* the VSI to disable source pruning.
*/
- if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+ if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) {
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
@@ -14017,7 +13963,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
/* MFP mode setup queue map and update VSI */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
@@ -14065,7 +14011,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) &&
(i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -14113,7 +14059,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
- if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
@@ -14329,7 +14275,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
/* In Legacy mode, we do not have to get any other vector since we
* piggyback on the misc/ICR0 for queue interrupts.
*/
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return ret;
if (vsi->num_q_vectors)
vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
@@ -14496,9 +14442,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
* already enabled, in which case we can't force VEPA
* mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
veb->bridge_mode = BRIDGE_MODE_VEPA;
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
}
i40e_config_bridge_mode(veb);
}
@@ -14594,12 +14540,16 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
break;
}
- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
- (vsi->type == I40E_VSI_VMDQ2)) {
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
+ vsi->type == I40E_VSI_VMDQ2) {
ret = i40e_vsi_config_rss(vsi);
+ if (ret)
+ goto err_config;
}
return vsi;
+err_config:
+ i40e_vsi_clear_rings(vsi);
err_rings:
i40e_vsi_free_q_vectors(vsi);
err_msix:
@@ -14837,7 +14787,7 @@ void i40e_veb_release(struct i40e_veb *veb)
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
+ bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
@@ -15026,12 +14976,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
* the PF's VSI
*/
pf->mac_seid = uplink_seid;
- pf->pf_seid = downlink_seid;
pf->main_vsi_seid = seid;
if (printconfig)
dev_info(&pf->pdev->dev,
"pf_seid=%d main_vsi_seid=%d\n",
- pf->pf_seid, pf->main_vsi_seid);
+ downlink_seid, pf->main_vsi_seid);
break;
case I40E_SWITCH_ELEMENT_TYPE_PF:
case I40E_SWITCH_ELEMENT_TYPE_VF:
@@ -15137,7 +15086,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
*/
if ((pf->hw.pf_id == 0) &&
- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) {
flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
pf->last_sw_conf_flags = flags;
}
@@ -15204,16 +15153,12 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
/* enable RSS in the HW, even for only one queue, as the stack can use
* the hash
*/
- if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
i40e_link_event(pf);
- /* Initialize user-specific link properties */
- pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
- I40E_AQ_AN_COMPLETED) ? true : false);
-
i40e_ptp_init(pf);
if (!lock_acquired)
@@ -15246,42 +15191,42 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
- !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->alloc_rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
- } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_CAPABLE))) {
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
+ } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) {
/* one qp for PF */
pf->alloc_rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps;
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
} else {
/* Not enough queues for all TCs */
- if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
- (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED);
+ if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) &&
+ queues_left < I40E_MAX_TRAFFIC_CLASS) {
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
@@ -15294,24 +15239,24 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
} else {
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
}
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
pf->num_req_vfs = min_t(int, pf->num_req_vfs,
(queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) &&
pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
(queues_left / pf->num_vmdq_qps));
@@ -15322,7 +15267,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
pf->hw.func_caps.num_tx_qp,
- !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+ !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags),
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
@@ -15346,7 +15291,8 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ||
+ test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -15378,21 +15324,21 @@ static void i40e_print_features(struct i40e_pf *pf)
i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
pf->vsi[pf->lan_vsi]->num_queue_pairs);
- if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " RSS");
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
}
- if (pf->flags & I40E_FLAG_DCB_CAPABLE)
+ if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " DCB");
i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
i += scnprintf(&buf[i], REMAIN(i), " Geneve");
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " PTP");
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " VEB");
else
i += scnprintf(&buf[i], REMAIN(i), " VEPA");
@@ -15423,22 +15369,26 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
* @fec_cfg: FEC option to set in flags
* @flags: ptr to flags in which we set FEC option
**/
-void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags)
{
- if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
- *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO) {
+ set_bit(I40E_FLAG_RS_FEC, flags);
+ set_bit(I40E_FLAG_BASE_R_FEC, flags);
+ }
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
- *flags |= I40E_FLAG_RS_FEC;
- *flags &= ~I40E_FLAG_BASE_R_FEC;
+ set_bit(I40E_FLAG_RS_FEC, flags);
+ clear_bit(I40E_FLAG_BASE_R_FEC, flags);
}
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
- *flags |= I40E_FLAG_BASE_R_FEC;
- *flags &= ~I40E_FLAG_RS_FEC;
+ set_bit(I40E_FLAG_BASE_R_FEC, flags);
+ clear_bit(I40E_FLAG_RS_FEC, flags);
+ }
+ if (fec_cfg == 0) {
+ clear_bit(I40E_FLAG_RS_FEC, flags);
+ clear_bit(I40E_FLAG_BASE_R_FEC, flags);
}
- if (fec_cfg == 0)
- *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
}
/**
@@ -15682,7 +15632,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
- static u16 pfs_found;
u16 wol_nvm_bits;
char nvm_ver[32];
u16 link_status;
@@ -15760,7 +15709,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
- pf->instance = pfs_found;
/* Select something other than the 802.1ad ethertype for the
* switch to use internally and drop on ingress.
@@ -15822,7 +15770,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
- pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
snprintf(pf->int_name, sizeof(pf->int_name) - 1,
"%s-%s:misc",
@@ -15864,15 +15811,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
hw->subsystem_device_id);
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
+ if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw) + 1))
dev_dbg(&pdev->dev,
"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,
I40E_FW_MINOR_VERSION(hw));
- else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
+ else if (i40e_is_aq_api_ver_lt(hw, 1, 4))
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
hw->aq.api_maj_ver,
@@ -15919,7 +15866,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* Ignore error return codes because if it was already disabled via
* hardware settings this will fail
*/
- if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
+ if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
i40e_aq_stop_lldp(hw, true, false, NULL);
}
@@ -15936,7 +15883,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
- pf->hw_features |= I40E_HW_PORT_ID_VALID;
+ set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps);
i40e_ptp_alloc_pins(pf);
pci_set_drvdata(pdev, pf);
@@ -15946,10 +15893,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
(!status &&
lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
- (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
- (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
+ (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) :
+ (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags));
dev_info(&pdev->dev,
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
+ test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
@@ -15959,7 +15906,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -16027,11 +15975,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
!test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev))
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
}
#endif
err = i40e_setup_pf_switch(pf, false, false);
@@ -16072,7 +16020,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
wr32(hw, I40E_REG_MSS, val);
}
- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
@@ -16092,7 +16040,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
err = i40e_setup_misc_vector(pf);
if (err) {
dev_info(&pdev->dev,
@@ -16105,8 +16053,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
!test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
@@ -16126,7 +16074,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif /* CONFIG_PCI_IOV */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
pf->num_iwarp_msix,
I40E_IWARP_IRQ_PILE_ID);
@@ -16134,7 +16082,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"failed to get tracking for %d vectors for IWARP err=%d\n",
pf->num_iwarp_msix, pf->iwarp_base_vector);
- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
}
}
@@ -16148,7 +16096,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
err = i40e_lan_add_device(pf);
if (err)
dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
@@ -16161,7 +16109,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* and will report PCI Gen 1 x 1 by default so don't bother
* checking them.
*/
- if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
+ if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) {
char speed[PCI_SPEED_SIZE] = "Unknown";
char width[PCI_WIDTH_SIZE] = "Unknown";
@@ -16215,7 +16163,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
- i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags);
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
@@ -16226,8 +16174,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
- I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
+ rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
pf->hw.port, val);
@@ -16242,10 +16190,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
- (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
- pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps);
if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
- pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
+ set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps);
/* print a string summarizing features */
i40e_print_features(pf);
@@ -16314,10 +16262,10 @@ static void i40e_remove(struct pci_dev *pdev)
usleep_range(1000, 2000);
set_bit(__I40E_IN_REMOVE, pf->state);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
}
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
@@ -16372,7 +16320,7 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_cloud_filter_exit(pf);
/* remove attached clients */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
ret_code = i40e_lan_del_device(pf);
if (ret_code)
dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
@@ -16391,7 +16339,7 @@ static void i40e_remove(struct pci_dev *pdev)
unmap:
/* Free MSI/legacy interrupt 0 when in recovery mode. */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
free_irq(pf->pdev->irq, pf);
/* shutdown the adminq */
@@ -16610,7 +16558,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
i40e_enable_mc_magic_wake(pf);
i40e_prep_for_reset(pf);
@@ -16622,7 +16571,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Free MSI/legacy interrupt 0 when in recovery mode. */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
free_irq(pf->pdev->irq, pf);
/* Since we're going to destroy queues during the
@@ -16663,7 +16612,8 @@ static int __maybe_unused i40e_suspend(struct device *dev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
i40e_enable_mc_magic_wake(pf);
/* Since we're going to destroy queues during the
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 77cdbfc19d47..605fd82f5d20 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include "i40e_alloc.h"
#include "i40e_prototype.h"
@@ -26,8 +27,7 @@ int i40e_init_nvm(struct i40e_hw *hw)
* as the blank mode may be used in the factory line.
*/
gens = rd32(hw, I40E_GLNVM_GENS);
- sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
- I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ sr_size = FIELD_GET(I40E_GLNVM_GENS_SR_SIZE_MASK, gens);
/* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
@@ -193,9 +193,8 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
- *data = (u16)((sr_reg &
- I40E_GLNVM_SRDATA_RDDATA_MASK)
- >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ *data = FIELD_GET(I40E_GLNVM_SRDATA_RDDATA_MASK,
+ sr_reg);
}
}
if (ret_code)
@@ -291,7 +290,7 @@ static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
static int __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset, u16 *data)
{
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps))
return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data);
@@ -310,14 +309,14 @@ int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
int ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps))
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps))
i40e_release_nvm(hw);
return ret_code;
@@ -499,7 +498,7 @@ static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset, u16 *words,
u16 *data)
{
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps))
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
@@ -521,7 +520,7 @@ int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
int ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
@@ -771,13 +770,12 @@ static inline u8 i40e_nvmupd_get_module(u32 val)
}
static inline u8 i40e_nvmupd_get_transaction(u32 val)
{
- return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+ return FIELD_GET(I40E_NVM_TRANS_MASK, val);
}
static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
{
- return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
- I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+ return FIELD_GET(I40E_NVM_PRESERVATION_FLAGS_MASK, val);
}
static const char * const i40e_nvm_update_state_str[] = {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 001162042050..ce1f11b8ad65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -501,4 +501,73 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+/* Firmware and AdminQ version check helpers */
+
+/**
+ * i40e_is_aq_api_ver_ge
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static inline bool i40e_is_aq_api_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.api_maj_ver > maj ||
+ (hw->aq.api_maj_ver == maj && hw->aq.api_min_ver >= min));
+}
+
+/**
+ * i40e_is_aq_api_ver_lt
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current HW API version is less than provided.
+ **/
+static inline bool i40e_is_aq_api_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return !i40e_is_aq_api_ver_ge(hw, maj, min);
+}
+
+/**
+ * i40e_is_fw_ver_ge
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is greater/equal than provided.
+ **/
+static inline bool i40e_is_fw_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.fw_maj_ver > maj ||
+ (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver >= min));
+}
+
+/**
+ * i40e_is_fw_ver_lt
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is less than provided.
+ **/
+static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return !i40e_is_fw_ver_ge(hw, maj, min);
+}
+
+/**
+ * i40e_is_fw_ver_eq
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is equal to provided.
+ **/
+static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
+}
+
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 20b77398f060..e7ebcb09f23c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -35,7 +35,7 @@ enum i40e_ptp_pin {
GPIO_4
};
-enum i40e_can_set_pins_t {
+enum i40e_can_set_pins {
CANT_DO_PINS = -1,
CAN_SET_PINS,
CAN_DO_PINS
@@ -193,7 +193,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
* return CAN_DO_PINS if pins can be manipulated within a NIC or
* return CANT_DO_PINS otherwise.
**/
-static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
+static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf)
{
if (!i40e_is_ptp_pin_dev(&pf->hw)) {
dev_warn(&pf->pdev->dev,
@@ -680,7 +680,7 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
* configured. We don't want to spuriously warn about Rx timestamp
* hangs if we don't care about the timestamps.
*/
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx)
return;
spin_lock_bh(&pf->ptp_rx_lock);
@@ -733,7 +733,7 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
{
struct sk_buff *skb;
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx)
return;
/* Nothing to do if we're not already waiting for a timestamp */
@@ -771,7 +771,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
u32 hi, lo;
u64 ns;
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx)
return;
/* don't attempt to timestamp if we don't have an skb */
@@ -818,7 +818,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
/* Since we cannot turn off the Rx timestamp logic if the device is
* doing Tx timestamping, check if Rx timestamping is configured.
*/
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx)
return;
hw = &pf->hw;
@@ -924,7 +924,7 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config = &pf->tstamp_config;
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
@@ -1071,7 +1071,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
static int i40e_ptp_set_pins(struct i40e_pf *pf,
struct i40e_ptp_pins_settings *pins)
{
- enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
+ enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf);
int i = 0;
if (pin_caps == CANT_DO_PINS)
@@ -1211,7 +1211,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
return -ERANGE;
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
@@ -1225,7 +1225,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
return -ERANGE;
fallthrough;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1234,7 +1234,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
- if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) {
tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
} else {
@@ -1308,7 +1308,7 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -1426,7 +1426,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
void i40e_ptp_save_hw_time(struct i40e_pf *pf)
{
/* don't try to access the PTP clock if it's not enabled */
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return;
i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
@@ -1480,10 +1480,10 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* Only one PF is assigned to control 1588 logic per port. Do not
* enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
*/
- pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
- I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ pf_id = FIELD_GET(I40E_PRTTSYN_CTL0_PF_ID_MASK,
+ rd32(hw, I40E_PRTTSYN_CTL0));
if (hw->pf_id != pf_id) {
- pf->flags &= ~I40E_FLAG_PTP;
+ clear_bit(I40E_FLAG_PTP_ENA, pf->flags);
dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
__func__,
netdev->name);
@@ -1504,7 +1504,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
if (pf->hw.debug_mask & I40E_DEBUG_LAN)
dev_info(&pf->pdev->dev, "PHC enabled\n");
- pf->flags |= I40E_FLAG_PTP;
+ set_bit(I40E_FLAG_PTP_ENA, pf->flags);
/* Ensure the clocks are running. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
@@ -1539,7 +1539,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
u32 regval;
- pf->flags &= ~I40E_FLAG_PTP;
+ clear_bit(I40E_FLAG_PTP_ENA, pf->flags);
pf->ptp_tx = false;
pf->ptp_rx = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index f6671ac79735..14ab642cafdb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -863,16 +863,6 @@
#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
#define I40E_PFPM_WUFC_MAG_SHIFT 1
#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VFQF_HLUT_MAX_INDEX 15
@@ -899,6 +889,7 @@
#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
/* Redefined for X722 family */
#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index dd410b15000f..0d7177083708 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -33,19 +33,16 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
- (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+ flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index);
- flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
- (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK,
+ fdata->flex_off);
- flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
- (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
/* Use LAN VSI Id if not programmed by user */
- flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
- ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
+ fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
@@ -55,17 +52,15 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
- (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl);
- dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
- (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK,
+ fdata->fd_status);
if (fdata->cnt_index) {
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
- ((u32)fdata->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ fdata->cnt_index);
}
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -464,7 +459,7 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
&pf->fd_tcp6_filter_cnt);
if (add) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
@@ -691,8 +686,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
u32 error;
qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
- error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
- I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+ error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1);
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
@@ -734,7 +728,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
!test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
pf->state))
if (I40E_DEBUG_FD & pf->hw.debug_mask)
@@ -1071,7 +1065,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
if (q_vector->arm_wb_state)
return;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
@@ -1095,7 +1089,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
**/
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
@@ -1403,8 +1397,7 @@ void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
u8 id;
- id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
- I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+ id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1);
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
@@ -1555,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int err;
u64_stats_init(&rx_ring->syncp);
@@ -1576,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
- /* XDP RX-queue info only needed for RX rings exposed to XDP */
- if (rx_ring->vsi->type == I40E_VSI_MAIN) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
- if (err < 0)
- return err;
- }
-
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
rx_ring->rx_bi =
@@ -1764,11 +1748,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
+ rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
@@ -1901,13 +1883,10 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb)
{
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
- u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
+ u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status);
+ u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -2099,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
- u32 next = rx_ring->next_to_clean;
+ u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
@@ -2112,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
- if (xdp_res == I40E_XDP_CONSUMED)
- rx_buffer->pagecnt_bias++;
- else
+ if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+ else if (i++ <= nr_frags)
+ rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
@@ -2129,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
+ struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
+ u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
@@ -2180,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
@@ -2199,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
- sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
@@ -2224,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo;
struct sk_buff *skb;
+ u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
@@ -2243,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
@@ -2255,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
@@ -2554,8 +2539,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
continue;
}
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
if (!size)
break;
@@ -2602,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, xdp, nfrags);
+ skb = i40e_build_skb(rx_ring, xdp);
else
- skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+ skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {
@@ -2699,7 +2683,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
u32 intval;
/* If we don't have MSIX, then we only need to re-enable icr0 */
- if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
+ if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
i40e_irq_dynamic_enable_icr0(vsi->back);
return;
}
@@ -2888,7 +2872,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
return;
if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
@@ -2933,7 +2917,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2959,8 +2943,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
+ tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
(I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
@@ -2986,16 +2970,14 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
dtype_cmd |=
- ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ I40E_FD_ATR_STAT_IDX(pf->hw.pf_id));
else
dtype_cmd |=
- ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id));
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags))
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -3053,7 +3035,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
}
- if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
goto out;
/* Insert 802.1p priority into VLAN header */
@@ -3229,7 +3211,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* we are not already transmitting a packet to be timestamped
*/
pf = i40e_netdev_to_pf(tx_ring->netdev);
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return 0;
if (pf->ptp_tx &&
@@ -3601,8 +3583,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
- I40E_TX_FLAGS_VLAN_SHIFT;
+ td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags);
}
first->tx_flags = tx_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 421fe5675584..abf15067eb5d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -58,7 +58,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
-enum i40e_dyn_idx_t {
+enum i40e_dyn_idx {
I40E_IDX_ITR0 = 0,
I40E_IDX_ITR1 = 1,
I40E_IDX_ITR2 = 2,
@@ -92,8 +92,8 @@ enum i40e_dyn_idx_t {
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \
- (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
@@ -306,7 +306,7 @@ struct i40e_rx_queue_stats {
u64 page_busy_count;
};
-enum i40e_ring_state_t {
+enum i40e_ring_state {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
__I40E_RING_STATE_NBITS /* must be last */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f95bc2a4a838..d9031499697e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -64,9 +64,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
I40E_MAC_XL710,
- I40E_MAC_VF,
I40E_MAC_X722,
- I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -272,9 +270,7 @@ struct i40e_mac_info {
enum i40e_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
- u8 san_addr[ETH_ALEN];
u8 port_addr[ETH_ALEN];
- u16 max_fcoeq;
};
enum i40e_aq_resources_ids {
@@ -482,6 +478,36 @@ struct i40e_dcbx_config {
struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
+enum i40e_hw_flags {
+ I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE,
+ I40E_HW_CAP_802_1AD,
+ I40E_HW_CAP_AQ_PHY_ACCESS,
+ I40E_HW_CAP_NVM_READ_REQUIRES_LOCK,
+ I40E_HW_CAP_FW_LLDP_STOPPABLE,
+ I40E_HW_CAP_FW_LLDP_PERSISTENT,
+ I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED,
+ I40E_HW_CAP_X722_FEC_REQUEST,
+ I40E_HW_CAP_RSS_AQ,
+ I40E_HW_CAP_128_QP_RSS,
+ I40E_HW_CAP_ATR_EVICT,
+ I40E_HW_CAP_WB_ON_ITR,
+ I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ I40E_HW_CAP_NO_PCI_LINK_CHECK,
+ I40E_HW_CAP_100M_SGMII,
+ I40E_HW_CAP_NO_DCB_SUPPORT,
+ I40E_HW_CAP_USE_SET_LLDP_MIB,
+ I40E_HW_CAP_GENEVE_OFFLOAD,
+ I40E_HW_CAP_PTP_L4,
+ I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE,
+ I40E_HW_CAP_CRT_RETIMER,
+ I40E_HW_CAP_OUTER_UDP_CSUM,
+ I40E_HW_CAP_PHY_CONTROLS_LEDS,
+ I40E_HW_CAP_STOP_FW_LLDP,
+ I40E_HW_CAP_PORT_ID_VALID,
+ I40E_HW_CAP_RESTART_AUTONEG,
+ I40E_HW_CAPS_NBITS,
+};
+
/* Port hardware description */
struct i40e_hw {
u8 __iomem *hw_addr;
@@ -546,16 +572,7 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
-#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
-#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
-#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
-#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
-#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
-#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
- u64 flags;
+ DECLARE_BITMAP(caps, I40E_HW_CAPS_NBITS);
/* Used in set switch config AQ command */
u16 switch_tag;
@@ -567,12 +584,6 @@ struct i40e_hw {
char err_str[16];
};
-static inline bool i40e_is_vf(struct i40e_hw *hw)
-{
- return (hw->mac.type == I40E_MAC_VF ||
- hw->mac.type == I40E_MAC_X722_VF);
-}
-
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index de5ec4e6bedf..b34c71770887 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -500,10 +500,10 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
*/
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
- next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
- >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
- next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
- >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+ next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK,
+ reg);
+ next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK,
+ reg);
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
reg = (next_q_index &
@@ -581,10 +581,10 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
* queue on top. Also link it with the new queue in CEQCTL.
*/
reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
- next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
- next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
- I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK,
+ reg);
+ next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK,
+ reg);
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
@@ -685,11 +685,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
- qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
- << I40E_QTX_CTL_VFVM_INDX_SHIFT)
- & I40E_QTX_CTL_VFVM_INDX_MASK);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ vf->vf_id + hw->func_caps.vf_base_id);
wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
i40e_flush(hw);
@@ -1834,7 +1832,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -1945,8 +1943,8 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
}
if (num_vfs) {
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
@@ -1955,7 +1953,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
@@ -2163,14 +2161,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |=
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
@@ -2179,12 +2177,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
- if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
+ if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) &&
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
vf->vf_id);
@@ -2194,7 +2192,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
}
- if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
vfres->vf_cap_flags |=
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
@@ -2607,6 +2605,14 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
int i;
+ if (vf->is_disabled_from_host) {
+ aq_ret = -EPERM;
+ dev_info(&pf->pdev->dev,
+ "Admin has disabled VF %d, will not enable queues\n",
+ vf->vf_id);
+ goto error_param;
+ }
+
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = -EINVAL;
goto error_param;
@@ -2842,6 +2848,24 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/**
+ * i40e_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !vf->trusted)
+ return false;
+
+ return true;
+}
+
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2901,8 +2925,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+ if (!i40e_can_vf_change_mac(vf) &&
+ !is_multicast_ether_addr(addr) &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
@@ -3108,19 +3132,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = -EINVAL;
goto error_param;
}
- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
- was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
- for (i = 0; i < al->num_elements; i++)
+ for (i = 0; i < al->num_elements; i++) {
+ const u8 *addr = al->list[i].addr;
+
+ /* Allow to delete VF primary MAC only if it was not set
+ * administratively by PF or if VF is trusted.
+ */
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
+ i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ }
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -4701,9 +4735,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->max_tx_rate = vf->tx_rate;
ivi->min_tx_rate = 0;
- ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
- ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
- I40E_VLAN_PRIORITY_SHIFT;
+ ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK);
+ ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK);
if (vf->link_forced == false)
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->link_up == true)
@@ -4734,9 +4767,12 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
struct i40e_link_status *ls = &pf->hw.phy.link_info;
struct virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ unsigned long q_map;
struct i40e_vf *vf;
int abs_vf_id;
int ret = 0;
+ int tmp;
if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
@@ -4759,17 +4795,38 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
switch (link) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
+ vf->is_disabled_from_host = false;
+ /* reset needed to reinit VF resources */
+ i40e_vc_reset_vf(vf, true);
i40e_set_vf_link_state(vf, &pfe, ls);
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
vf->link_up = true;
+ vf->is_disabled_from_host = false;
+ /* reset needed to reinit VF resources */
+ i40e_vc_reset_vf(vf, true);
i40e_set_vf_link_state(vf, &pfe, ls);
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
vf->link_up = false;
i40e_set_vf_link_state(vf, &pfe, ls);
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ q_map = BIT(vsi->num_queue_pairs) - 1;
+
+ vf->is_disabled_from_host = true;
+
+ /* Try to stop both Tx&Rx rings even if one of the calls fails
+ * to ensure we stop the rings even in case of errors.
+ * If any of them returns with an error then the first
+ * error that occurred will be returned.
+ */
+ tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
+ ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
+
+ ret = tmp ? tmp : ret;
break;
default:
ret = -EINVAL;
@@ -4869,7 +4926,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
}
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5fd607c0de0a..66f95e2f3146 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -100,6 +100,7 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ bool is_disabled_from_host; /* PF ctrl of VF enable/disable */
u16 num_vlan;
/* ADq related variables */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e99fa854d17f..11500003af0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -476,8 +477,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
continue;
}
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
if (!size)
break;
@@ -499,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
&rx_bytes, xdp_res, &failure);
- first->flags = 0;
next_to_clean = next_to_process;
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 63b45c61cc4a..db8188c7ac4b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -313,7 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
-#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
+#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(16)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@@ -415,6 +416,7 @@ struct iavf_adapter {
struct iavf_vsi vsi;
u32 aq_wait_count;
/* RSS stuff */
+ enum virtchnl_rss_algorithm hfunc;
u64 hena;
u16 rss_key_size;
u16 rss_lut_size;
@@ -540,6 +542,7 @@ void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
+void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9ffbd24d83cb..82fcd18ad660 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -8,27 +8,6 @@
#include "iavf_prototype.h"
/**
- * iavf_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void iavf_adminq_init_regs(struct iavf_hw *hw)
-{
- /* set head and tail registers in our local struct */
- hw->aq.asq.tail = IAVF_VF_ATQT1;
- hw->aq.asq.head = IAVF_VF_ATQH1;
- hw->aq.asq.len = IAVF_VF_ATQLEN1;
- hw->aq.asq.bal = IAVF_VF_ATQBAL1;
- hw->aq.asq.bah = IAVF_VF_ATQBAH1;
- hw->aq.arq.tail = IAVF_VF_ARQT1;
- hw->aq.arq.head = IAVF_VF_ARQH1;
- hw->aq.arq.len = IAVF_VF_ARQLEN1;
- hw->aq.arq.bal = IAVF_VF_ARQBAL1;
- hw->aq.arq.bah = IAVF_VF_ARQBAH1;
-}
-
-/**
* iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
@@ -259,17 +238,17 @@ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, IAVF_VF_ATQH1, 0);
+ wr32(hw, IAVF_VF_ATQT1, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries |
IAVF_VF_ATQLEN1_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
+ reg = rd32(hw, IAVF_VF_ATQBAL1);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
@@ -288,20 +267,20 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, IAVF_VF_ARQH1, 0);
+ wr32(hw, IAVF_VF_ARQT1, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries |
IAVF_VF_ARQLEN1_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+ wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
+ reg = rd32(hw, IAVF_VF_ARQBAL1);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
@@ -455,11 +434,11 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
+ wr32(hw, IAVF_VF_ATQH1, 0);
+ wr32(hw, IAVF_VF_ATQT1, 0);
+ wr32(hw, IAVF_VF_ATQLEN1, 0);
+ wr32(hw, IAVF_VF_ATQBAL1, 0);
+ wr32(hw, IAVF_VF_ATQBAH1, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
@@ -489,11 +468,11 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
+ wr32(hw, IAVF_VF_ARQH1, 0);
+ wr32(hw, IAVF_VF_ARQT1, 0);
+ wr32(hw, IAVF_VF_ARQLEN1, 0);
+ wr32(hw, IAVF_VF_ARQBAL1, 0);
+ wr32(hw, IAVF_VF_ARQBAH1, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
@@ -529,9 +508,6 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
goto init_adminq_exit;
}
- /* Set up register offsets */
- iavf_adminq_init_regs(hw);
-
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
@@ -587,9 +563,9 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
+ while (rd32(hw, IAVF_VF_ATQH1) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1));
if (details->callback) {
IAVF_ADMINQ_CALLBACK cb_func =
@@ -624,7 +600,7 @@ bool iavf_asq_done(struct iavf_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+ return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use;
}
/**
@@ -663,7 +639,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
hw->aq.asq_last_status = IAVF_AQ_RC_OK;
- val = rd32(hw, hw->aq.asq.head);
+ val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
@@ -755,7 +731,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+ wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
@@ -810,7 +786,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
@@ -878,7 +854,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
@@ -926,7 +902,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
+ wr32(hw, IAVF_VF_ARQT1, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 1f60518eb0e5..406506f64bdd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -29,13 +29,6 @@ struct iavf_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
};
/* ASQ transaction details */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
index 6edbf134b73f..a9e1da35e248 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -95,17 +95,21 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
* @hash_flds: the RSS configuration protocol hash fields
+ * @symm: if true, symmetric hash is required
*
* Returns 0 if the RSS configuration virtchnl message is filled successfully
*/
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
- u32 packet_hdrs, u64 hash_flds)
+ u32 packet_hdrs, u64 hash_flds, bool symm)
{
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
- rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ if (symm)
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ else
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
proto_hdrs->tunnel_level = 0; /* always outer layer */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
index 4d3be11af7aa..e31eb2afebea 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -80,13 +80,14 @@ struct iavf_adv_rss {
u32 packet_hdrs;
u64 hash_flds;
+ bool symm;
struct virtchnl_rss_cfg cfg_msg;
};
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
- u32 packet_hdrs, u64 hash_flds);
+ u32 packet_hdrs, u64 hash_flds, bool symm);
struct iavf_adv_rss *
iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
void
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 8091e6feca01..5a25233a89d5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/avf/virtchnl.h>
+#include <linux/bitfield.h>
#include "iavf_type.h"
#include "iavf_adminq.h"
#include "iavf_prototype.h"
-#include <linux/avf/virtchnl.h>
/**
* iavf_aq_str - convert AQ err code to a string
@@ -279,11 +280,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
**/
bool iavf_check_asq_alive(struct iavf_hw *hw)
{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- IAVF_VF_ATQLEN1_ATQENABLE_MASK);
- else
+ /* Check if the queue is initialized */
+ if (!hw->aq.asq.count)
return false;
+
+ return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK);
}
/**
@@ -330,6 +331,7 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
struct iavf_aq_desc desc;
struct iavf_aqc_get_set_rss_lut *cmd_resp =
(struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
+ u16 flags;
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
@@ -342,22 +344,18 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
+ vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
+
+ cmd_resp->flags = cpu_to_le16(flags);
status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
@@ -411,11 +409,9 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
+ vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index dc499fe7734e..378c3e9ddf9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+#include <linux/uaccess.h>
+
/* ethtool support for iavf */
#include "iavf.h"
-#include <linux/uaccess.h>
-
/* ethtool statistics helpers */
/**
@@ -396,8 +397,7 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
unsigned int i;
for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&data, "%s",
- iavf_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string);
}
/**
@@ -1017,8 +1017,7 @@ iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
flex = &fltr->flex_words[cnt++];
flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
- flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
- IAVF_USERDEF_FLEX_OFFS_S;
+ flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
}
@@ -1436,16 +1435,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
- if (adapter->link_up) {
+
+ if (adapter->link_up)
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
- } else {
+ else
fltr->state = IAVF_FDIR_FLTR_INACTIVE;
- }
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (adapter->link_up)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
ret:
if (err && fltr)
kfree(fltr);
@@ -1475,7 +1473,6 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr) {
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
list_del(&fltr->list);
kfree(fltr);
@@ -1490,7 +1487,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
return err;
}
@@ -1541,11 +1538,12 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
/**
* iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
* @cmd: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended hash fields for
* RSS configuration
*/
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
+static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1617,17 +1615,20 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
int count = 50, err = 0;
+ bool symm = false;
u64 hash_flds;
u32 hdrs;
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
+ symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
+
hdrs = iavf_adv_rss_parse_hdrs(cmd);
if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
return -EINVAL;
- hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
+ hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
return -EINVAL;
@@ -1635,7 +1636,8 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!rss_new)
return -ENOMEM;
- if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
+ if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
+ symm)) {
kfree(rss_new);
return -EINVAL;
}
@@ -1654,12 +1656,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (rss_old) {
if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
err = -EBUSY;
- } else if (rss_old->hash_flds != hash_flds) {
+ } else if (rss_old->hash_flds != hash_flds ||
+ rss_old->symm != symm) {
rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_old->hash_flds = hash_flds;
+ rss_old->symm = symm;
memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
sizeof(rss_new->cfg_msg));
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
} else {
err = -EEXIST;
}
@@ -1668,13 +1671,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_new->packet_hdrs = hdrs;
rss_new->hash_flds = hash_flds;
+ rss_new->symm = symm;
list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
mutex_unlock(&adapter->crit_lock);
@@ -1908,27 +1911,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
/**
* iavf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
-static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int iavf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (key)
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
+
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
- if (indir)
+ if (rxfh->indir)
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
+ rxfh->indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
@@ -1936,33 +1939,46 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/**
* iavf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int iavf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
/* Only support toeplitz hash function */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!key && !indir)
+ if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ }
+
+ if (!rxfh->key && !rxfh->indir)
return 0;
- if (key)
- memcpy(adapter->rss_key, key, adapter->rss_key_size);
+ if (rxfh->key)
+ memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
}
return iavf_config_rss(adapter);
@@ -1971,6 +1987,7 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .cap_rss_sym_xor_supported = true,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 03e774bd2a5b..2d47b0b4640e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -3,6 +3,7 @@
/* flow director ethtool support for iavf */
+#include <linux/bitfield.h>
#include "iavf.h"
#define GTPU_PORT 2152
@@ -357,7 +358,7 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
if (fltr->ip_mask.tclass == U8_MAX) {
iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
- iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
+ iph->flow_lbl[0] = FIELD_PREP(0xF0, fltr->ip_data.tclass);
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e8d5b889addc..335fd13e86f7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1038,13 +1038,12 @@ static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
*/
new_f->is_primary = true;
new_f->add = true;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
ether_addr_copy(hw->mac.addr, new_mac);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
return 0;
}
@@ -1263,8 +1262,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
iavf_napi_enable_all(adapter);
- adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
}
/**
@@ -1420,8 +1418,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
}
- adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
}
/**
@@ -2150,6 +2147,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_set_rss_lut(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
+ iavf_set_rss_hfunc(adapter);
+ return 0;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
iavf_set_promiscuous(adapter);
@@ -2318,10 +2319,8 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
}
}
- if (aq_required) {
- adapter->aq_required |= aq_required;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
- }
+ if (aq_required)
+ iavf_schedule_aq_request(adapter, aq_required);
}
/**
@@ -3234,7 +3233,7 @@ static void iavf_adminq_task(struct work_struct *work)
goto freedom;
/* check for error indications */
- val = rd32(hw, hw->aq.arq.len);
+ val = rd32(hw, IAVF_VF_ARQLEN1);
if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
goto freedom;
oldval = val;
@@ -3251,9 +3250,9 @@ static void iavf_adminq_task(struct work_struct *work)
val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
}
if (oldval != val)
- wr32(hw, hw->aq.arq.len, val);
+ wr32(hw, IAVF_VF_ARQLEN1, val);
- val = rd32(hw, hw->aq.asq.len);
+ val = rd32(hw, IAVF_VF_ATQLEN1);
oldval = val;
if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
@@ -3268,7 +3267,7 @@ static void iavf_adminq_task(struct work_struct *work)
val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
}
if (oldval != val)
- wr32(hw, hw->aq.asq.len, val);
+ wr32(hw, IAVF_VF_ATQLEN1, val);
freedom:
kfree(event.msg_buf);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index d64c4997136b..b71484c87a84 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include "iavf.h"
@@ -988,11 +989,9 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
- IAVF_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
- IAVF_RXD_QW1_STATUS_SHIFT;
+ ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
+ rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
@@ -1533,8 +1532,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
break;
- size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
@@ -1581,8 +1579,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
- IAVF_RXD_QW1_PTYPE_SHIFT;
+ rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@@ -2290,8 +2287,7 @@ static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
- IAVF_TX_FLAGS_VLAN_SHIFT;
+ td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
}
first->tx_flags = tx_flags;
@@ -2467,8 +2463,7 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
IAVF_TXD_CTX_QW1_CMD_SHIFT;
- cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
- IAVF_TX_FLAGS_VLAN_SHIFT;
+ cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
}
/* obtain protocol of skb */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 2d9366be0ec5..22f2df7c460b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1142,6 +1142,34 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_rss_hfunc
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS Hash function
+ **/
+void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_hfunc *vrh;
+ int len = sizeof(*vrh);
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh)
+ return;
+ vrh->vsi_id = adapter->vsi.id;
+ vrh->rss_algorithm = adapter->hfunc;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
+ kfree(vrh);
+}
+
+/**
* iavf_enable_vlan_stripping
* @adapter: adapter structure
*
@@ -2190,6 +2218,19 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+
+ if (adapter->hfunc ==
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ adapter->hfunc =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ else
+ adapter->hfunc =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 0679907980f7..cddd82d4ca0f 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -34,7 +34,9 @@ ice-y := ice_main.o \
ice_lag.o \
ice_ethtool.o \
ice_repr.o \
- ice_tc_lib.o
+ ice_tc_lib.o \
+ ice_fwlog.o \
+ ice_debugfs.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
ice_virtchnl.o \
@@ -49,3 +51,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
+ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 351e0d36df44..367b613d92c0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -360,6 +360,7 @@ struct ice_vsi {
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
+ u8 rss_hfunc; /* User configured hash type */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
@@ -517,16 +518,22 @@ enum ice_pf_flags {
};
enum ice_misc_thread_tasks {
- ICE_MISC_THREAD_EXTTS_EVENT,
ICE_MISC_THREAD_TX_TSTAMP,
ICE_MISC_THREAD_NBITS /* must be last */
};
-struct ice_switchdev_info {
+struct ice_eswitch {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
+ struct xarray reprs;
bool is_running;
+ /* struct to allow cp queues management optimization */
+ struct {
+ int to_reach;
+ int value;
+ bool is_reaching;
+ } qs;
};
struct ice_agg_node {
@@ -563,6 +570,10 @@ struct ice_pf {
struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
+ struct dentry *ice_debugfs_pf;
+ struct dentry *ice_debugfs_pf_fwlog;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -597,6 +608,7 @@ struct ice_pf {
u32 hw_csum_rx_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
+ struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u16 num_lan_msix; /* Total MSIX vectors for base driver */
@@ -621,6 +633,7 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
@@ -637,7 +650,7 @@ struct ice_pf {
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
- struct ice_switchdev_info switchdev;
+ struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
@@ -648,6 +661,7 @@ struct ice_pf {
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
+ struct device *hwmon_dev;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -846,7 +860,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
*/
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
{
- return pf->switchdev.is_running;
+ return pf->eswitch.is_running;
}
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
@@ -881,6 +895,11 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
+void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_init(void);
+void ice_debugfs_exit(void);
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
+
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
@@ -912,6 +931,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
@@ -989,4 +1009,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
+
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index fbd5d92182d3..8040317c9561 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,6 +117,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_SENSOR_READING 0x0067
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
@@ -421,10 +422,10 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH 0x0U
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP 0x1U
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR 0x2U
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING 0x3U
u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
@@ -490,11 +491,11 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M GENMASK(7, 6)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U
u8 q_opt_tc;
#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
@@ -1414,6 +1415,30 @@ struct ice_aqc_get_phy_rec_clk_out {
__le16 node_handle;
};
+/* Get sensor reading (direct 0x0632) */
+struct ice_aqc_get_sensor_reading {
+ u8 sensor;
+ u8 format;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get sensor reading response (direct 0x0632) */
+struct ice_aqc_get_sensor_reading_resp {
+ union {
+ u8 raw[8];
+ /* Output data for sensor 0x00, format 0x00 */
+ struct _packed {
+ s8 temp;
+ u8 temp_warning_threshold;
+ u8 temp_critical_threshold;
+ u8 temp_fatal_threshold;
+ u8 reserved[4];
+ } s0f0;
+ } data;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -2070,78 +2095,6 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
-/* Configure Firmware Logging Command (indirect 0xFF09)
- * Logging Information Read Response (indirect 0xFF10)
- * Note: The 0xFF10 command has no input parameters.
- */
-struct ice_aqc_fw_logging {
- u8 log_ctrl;
-#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
-#define ICE_AQC_FW_LOG_UART_EN BIT(1)
- u8 rsvd0;
- u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
-#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
-#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
- u8 rsvd1[5];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_NETPROXY,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Defines for both above FW logging command/response buffers */
-#define ICE_AQC_FW_LOG_ID_S 0
-#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
-
-#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
-#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
-
-#define ICE_AQC_FW_LOG_EN_S 12
-#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
-#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
-#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
-#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
-#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
-
-/* Get/Clear FW Log (indirect 0xFF11) */
-struct ice_aqc_get_clear_fw_log {
- u8 flags;
-#define ICE_AQC_FW_LOG_CLEAR BIT(0)
-#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
- u8 rsvd1[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2404,6 +2357,84 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_XLR,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_SYNCE,
+ ICE_AQC_FW_LOG_ID_HEALTH,
+ ICE_AQC_FW_LOG_ID_TSDRV,
+ ICE_AQC_FW_LOG_ID_PFREG,
+ ICE_AQC_FW_LOG_ID_MDLVER,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+struct ice_aqc_fw_log {
+ u8 cmd_flags;
+#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
+#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
+
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ice_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -2444,6 +2475,8 @@ struct ice_aq_desc {
struct ice_aqc_restart_an restart_an;
struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
+ struct ice_aqc_get_sensor_reading get_sensor_reading;
+ struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2481,8 +2514,6 @@ struct ice_aq_desc {
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_fw_logging fw_logging;
- struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_cgu_input_config set_cgu_input_config;
struct ice_aqc_get_cgu_input_config get_cgu_input_config;
@@ -2494,6 +2525,7 @@ struct ice_aq_desc {
struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
+ struct ice_aqc_fw_log fw_log;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
@@ -2619,6 +2651,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
+ ice_aqc_opc_get_sensor_reading = 0x0632,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2691,9 +2724,11 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
- /* debug commands */
- ice_aqc_opc_fw_logging = 0xFF09,
- ice_aqc_opc_fw_logging_info = 0xFF10,
+ /* FW Logging Commands */
+ ice_aqc_opc_fw_logs_config = 0xFF30,
+ ice_aqc_opc_fw_logs_register = 0xFF31,
+ ice_aqc_opc_fw_logs_query = 0xFF32,
+ ice_aqc_opc_fw_logs_event = 0xFF33,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 7fa43827a3f0..c979192e44d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -189,10 +189,16 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ }
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
+ }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -224,24 +230,16 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
/* no need to update global register if ITR gran is already set */
if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
return;
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
+ regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
wr32(hw, GLINT_CTL, regval);
}
@@ -278,7 +276,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
*/
static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
{
- struct ice_vsi *vsi = ring->vsi;
+ const struct ice_vsi *vsi = ring->vsi;
int i;
ice_for_each_txq(vsi, i) {
@@ -519,6 +517,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
+static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+{
+ void *ctx_ptr = &ring->pkt_ctx;
+ struct xsk_cb_desc desc = {};
+
+ XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
+ desc.src = &ctx_ptr;
+ desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
+ sizeof(struct xdp_buff);
+ desc.bytes = sizeof(ctx_ptr);
+ xsk_pool_fill_cb(ring->xsk_pool, &desc);
+}
+
/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
@@ -534,36 +545,46 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -575,6 +596,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
ring->xdp.data = NULL;
+ ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
@@ -913,10 +935,10 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
struct ice_hw *hw = &pf->hw;
u32 val;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -945,10 +967,10 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
struct ice_hw *hw = &pf->hw;
u32 val;
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+ itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+ FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
@@ -960,7 +982,7 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
* @hw: pointer to the HW structure
* @q_vector: interrupt vector to trigger the software interrupt for
*/
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
{
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
(ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
@@ -1035,7 +1057,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_channel *ch = ring->ch;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b67dca417acb..17321ba75602 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -22,12 +22,12 @@ void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index edac34c796ce..10c32cd80fff 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -934,216 +934,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
/**
- * ice_get_fw_log_cfg - get FW logging configuration
- * @hw: pointer to the HW struct
- */
-static int ice_get_fw_log_cfg(struct ice_hw *hw)
-{
- struct ice_aq_desc desc;
- __le16 *config;
- int status;
- u16 size;
-
- size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
- config = kzalloc(size, GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
-
- status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
- if (!status) {
- u16 i;
-
- /* Save FW logging information into the HW structure */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 v, m, flgs;
-
- v = le16_to_cpu(config[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
-
- if (m < ICE_AQC_FW_LOG_ID_MAX)
- hw->fw_log.evnts[m].cur = flgs;
- }
- }
-
- kfree(config);
-
- return status;
-}
-
-/**
- * ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the HW struct
- * @enable: enable certain FW logging events if true, disable all if false
- *
- * This function enables/disables the FW logging via Rx CQ events and a UART
- * port based on predetermined configurations. FW logging via the Rx CQ can be
- * enabled/disabled for individual PF's. However, FW logging via the UART can
- * only be enabled/disabled for all PFs on the same device.
- *
- * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
- * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
- * before initializing the device.
- *
- * When re/configuring FW logging, callers need to update the "cfg" elements of
- * the hw->fw_log.evnts array with the desired logging event configurations for
- * modules of interest. When disabling FW logging completely, the callers can
- * just pass false in the "enable" parameter. On completion, the function will
- * update the "cur" element of the hw->fw_log.evnts array with the resulting
- * logging event configurations of the modules that are being re/configured. FW
- * logging modules that are not part of a reconfiguration operation retain their
- * previous states.
- *
- * Before resetting the device, it is recommended that the driver disables FW
- * logging before shutting down the control queue. When disabling FW logging
- * ("enable" = false), the latest configurations of FW logging events stored in
- * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
- * a device reset.
- *
- * When enabling FW logging to emit log messages via the Rx CQ during the
- * device's initialization phase, a mechanism alternative to interrupt handlers
- * needs to be used to extract FW log messages from the Rx CQ periodically and
- * to prevent the Rx CQ from being full and stalling other types of control
- * messages from FW to SW. Interrupts are typically disabled during the device's
- * initialization phase.
- */
-static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
-{
- struct ice_aqc_fw_logging *cmd;
- u16 i, chgs = 0, len = 0;
- struct ice_aq_desc desc;
- __le16 *data = NULL;
- u8 actv_evnts = 0;
- void *buf = NULL;
- int status = 0;
-
- if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
- return 0;
-
- /* Disable FW logging only when the control queue is still responsive */
- if (!enable &&
- (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
- return 0;
-
- /* Get current FW log settings */
- status = ice_get_fw_log_cfg(hw);
- if (status)
- return status;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
- cmd = &desc.params.fw_logging;
-
- /* Indicate which controls are valid */
- if (hw->fw_log.cq_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
-
- if (enable) {
- /* Fill in an array of entries with FW logging modules and
- * logging events being reconfigured.
- */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 val;
-
- /* Keep track of enabled event types */
- actv_evnts |= hw->fw_log.evnts[i].cfg;
-
- if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
- continue;
-
- if (!data) {
- data = devm_kcalloc(ice_hw_to_dev(hw),
- ICE_AQC_FW_LOG_ID_MAX,
- sizeof(*data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- }
-
- val = i << ICE_AQC_FW_LOG_ID_S;
- val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
- data[chgs++] = cpu_to_le16(val);
- }
-
- /* Only enable FW logging if at least one module is specified.
- * If FW logging is currently enabled but all modules are not
- * enabled to emit log messages, disable FW logging altogether.
- */
- if (actv_evnts) {
- /* Leave if there is effectively no change */
- if (!chgs)
- goto out;
-
- if (hw->fw_log.cq_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
-
- buf = data;
- len = sizeof(*data) * chgs;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- }
- }
-
- status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
- if (!status) {
- /* Update the current configuration to reflect events enabled.
- * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
- * logging mode is enabled for the device. They do not reflect
- * actual modules being enabled to emit log messages. So, their
- * values remain unchanged even when all modules are disabled.
- */
- u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
-
- hw->fw_log.actv_evnts = actv_evnts;
- for (i = 0; i < cnt; i++) {
- u16 v, m;
-
- if (!enable) {
- /* When disabling all FW logging events as part
- * of device's de-initialization, the original
- * configurations are retained, and can be used
- * to reconfigure FW logging later if the device
- * is re-initialized.
- */
- hw->fw_log.evnts[i].cur = 0;
- continue;
- }
-
- v = le16_to_cpu(data[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
- }
- }
-
-out:
- devm_kfree(ice_hw_to_dev(hw), data);
-
- return status;
-}
-
-/**
- * ice_output_fw_log
- * @hw: pointer to the HW struct
- * @desc: pointer to the AQ message descriptor
- * @buf: pointer to the buffer accompanying the AQ message
- *
- * Formats a FW Log message and outputs it via the standard driver logs.
- */
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
-{
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
- ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
- le16_to_cpu(desc->datalen));
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
-}
-
-/**
* ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
@@ -1152,9 +942,8 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
- u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
- GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
- GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+ u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
+ rd32(hw, GL_PWR_MODE_CTL));
switch (max_agg_bw) {
case ICE_MAX_AGG_BW_200G:
@@ -1186,9 +975,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
- PF_FUNC_RID_FUNC_NUM_M) >>
- PF_FUNC_RID_FUNC_NUM_S;
+ hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID));
status = ice_reset(hw, ICE_RESET_PFR);
if (status)
@@ -1200,10 +987,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- /* Enable FW logging. Not fatal if this fails. */
- status = ice_cfg_fw_log(hw, true);
+ status = ice_fwlog_init(hw);
if (status)
- ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+ ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
+ status);
status = ice_clear_pf_cfg(hw);
if (status)
@@ -1354,8 +1141,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- /* Attempt to disable FW logging before shutting down control queues */
- ice_cfg_fw_log(hw, false);
+ ice_fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1374,8 +1160,8 @@ int ice_check_reset(struct ice_hw *hw)
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
+ rd32(hw, GLGEN_RSTCTL)) + 10;
for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
@@ -2459,7 +2245,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
@@ -2660,11 +2446,12 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
- info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+ info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number);
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+ info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2685,6 +2472,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_ena);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
info->ts_ll_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
+ info->ts_ll_int_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2711,6 +2500,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
+ * enabled sensors.
+ */
+static void
+ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->supported_sensors = le32_to_cpu(cap->number);
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "dev caps: supported sensors (bitmap) = 0x%x\n",
+ dev_p->supported_sensors);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2755,9 +2564,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_SENSOR_READING:
+ ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -4072,6 +3884,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
+ u16 i2c_bus_addr;
int status;
if (!data || (mem_addr & 0xff00))
@@ -4082,15 +3895,13 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
- cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
- ICE_AQC_SFF_I2CBUS_7BIT_M) |
- ((set_page <<
- ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
- ICE_AQC_SFF_SET_EEPROM_PAGE_M));
- cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
- cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
+ FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page);
if (write)
- cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
+ i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE;
+ cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr);
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
+ cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M);
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
return status;
@@ -4345,6 +4156,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
+ u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
@@ -4360,27 +4172,26 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd->num_entries = num_qgrps;
- cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
- ICE_AQC_Q_DIS_TIMEOUT_M);
+ vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5);
switch (rst_src) {
case ICE_VM_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
- cmd->vmvf_and_timeout |=
- cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M;
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
/* In this case, FW expects vmvf_num to be absolute VF ID */
- cmd->vmvf_and_timeout |=
- cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
- ICE_AQC_Q_DIS_VMVF_NUM_M);
+ vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M;
break;
case ICE_NO_RESET:
default:
break;
}
+ cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout);
+
/* flush pipe on time out */
cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
/* If no queue group info, we are in a reset flow. Issue the AQ */
@@ -4455,10 +4266,8 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
- cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
- ICE_AQC_Q_CFG_DST_PRT_M;
- cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
- ICE_AQC_Q_CFG_TIMEOUT_M;
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -5539,6 +5348,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
}
/**
+ * ice_aq_get_sensor_reading
+ * @hw: pointer to the HW struct
+ * @data: pointer to data to be read from the sensor
+ *
+ * Get sensor reading (0x0632)
+ */
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data)
+{
+ struct ice_aqc_get_sensor_reading *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
+ cmd = &desc.params.get_sensor_reading;
+#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
+#define ICE_INTERNAL_TEMP_SENSOR 0
+ cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
+ cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ memcpy(data, &desc.params.get_sensor_reading_resp,
+ sizeof(*data));
+
+ return status;
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
@@ -5933,7 +5771,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
- ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf);
ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
ICE_LINK_OVERRIDE_PHY_CFG_S;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 31fdcac33986..3e933f75e948 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
+#include "ice.h"
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
@@ -199,7 +200,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
@@ -241,6 +241,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
int
ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle);
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 396e555023aa..74418c445cc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -35,8 +35,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
- cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M;
+ cmd->type |= FIELD_PREP(ICE_AQ_LLDP_BRID_TYPE_M, bridge_type);
desc.datalen = cpu_to_le16(buf_size);
@@ -147,8 +146,7 @@ static u8 ice_get_dcbx_status(struct ice_hw *hw)
u32 reg;
reg = rd32(hw, PRTDCB_GENS);
- return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
- PRTDCB_GENS_DCBX_STATUS_S);
+ return FIELD_GET(PRTDCB_GENS_DCBX_STATUS_M, reg);
}
/**
@@ -174,11 +172,9 @@ ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
*/
for (i = 0; i < 4; i++) {
ets_cfg->prio_table[i * 2] =
- ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
- ICE_IEEE_ETS_PRIO_1_S);
+ FIELD_GET(ICE_IEEE_ETS_PRIO_1_M, buf[offset]);
ets_cfg->prio_table[i * 2 + 1] =
- ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
- ICE_IEEE_ETS_PRIO_0_S);
+ FIELD_GET(ICE_IEEE_ETS_PRIO_0_M, buf[offset]);
offset++;
}
@@ -222,11 +218,9 @@ ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
- etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
- ICE_IEEE_ETS_WILLING_S);
- etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
- etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
- ICE_IEEE_ETS_MAXTC_S);
+ etscfg->willing = FIELD_GET(ICE_IEEE_ETS_WILLING_M, buf[0]);
+ etscfg->cbs = FIELD_GET(ICE_IEEE_ETS_CBS_M, buf[0]);
+ etscfg->maxtcs = FIELD_GET(ICE_IEEE_ETS_MAXTC_M, buf[0]);
/* Begin parsing at Priority Assignment Table (offset 1 in buf) */
ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
@@ -268,11 +262,9 @@ ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
- dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
- ICE_IEEE_PFC_WILLING_S);
- dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
- dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
- ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.willing = FIELD_GET(ICE_IEEE_PFC_WILLING_M, buf[0]);
+ dcbcfg->pfc.mbc = FIELD_GET(ICE_IEEE_PFC_MBC_M, buf[0]);
+ dcbcfg->pfc.pfccap = FIELD_GET(ICE_IEEE_PFC_CAP_M, buf[0]);
dcbcfg->pfc.pfcena = buf[1];
}
@@ -294,7 +286,7 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
u8 *buf;
typelen = ntohs(tlv->typelen);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
buf = tlv->tlvinfo;
/* Removing sizeof(ouisubtype) and reserved byte from len.
@@ -314,12 +306,10 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
* -----------------------------------------
*/
while (offset < len) {
- dcbcfg->app[i].priority = ((buf[offset] &
- ICE_IEEE_APP_PRIO_M) >>
- ICE_IEEE_APP_PRIO_S);
- dcbcfg->app[i].selector = ((buf[offset] &
- ICE_IEEE_APP_SEL_M) >>
- ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].priority = FIELD_GET(ICE_IEEE_APP_PRIO_M,
+ buf[offset]);
+ dcbcfg->app[i].selector = FIELD_GET(ICE_IEEE_APP_SEL_M,
+ buf[offset]);
dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
@@ -347,8 +337,7 @@ ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
- ICE_LLDP_TLV_SUBTYPE_S);
+ subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype);
switch (subtype) {
case ICE_IEEE_SUBTYPE_ETS_CFG:
ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
@@ -399,11 +388,9 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
*/
for (i = 0; i < 4; i++) {
etscfg->prio_table[i * 2] =
- ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
- ICE_CEE_PGID_PRIO_1_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_1_M, buf[offset]);
etscfg->prio_table[i * 2 + 1] =
- ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
- ICE_CEE_PGID_PRIO_0_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_0_M, buf[offset]);
offset++;
}
@@ -466,7 +453,7 @@ ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u8 i;
typelen = ntohs(tlv->hdr.typelen);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
dcbcfg->numapps = len / sizeof(*app);
if (!dcbcfg->numapps)
@@ -521,14 +508,13 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
- ICE_LLDP_TLV_SUBTYPE_S);
+ subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype);
/* Return if not CEE DCBX */
if (subtype != ICE_CEE_DCBX_TYPE)
return;
typelen = ntohs(tlv->typelen);
- tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ tlvlen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
sizeof(struct ice_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
@@ -540,9 +526,8 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u16 sublen;
typelen = ntohs(sub_tlv->hdr.typelen);
- sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
- subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
- ICE_LLDP_TLV_TYPE_S);
+ sublen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
+ subtype = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen);
switch (subtype) {
case ICE_CEE_SUBTYPE_PG_CFG:
ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
@@ -579,7 +564,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
- oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ oui = FIELD_GET(ICE_LLDP_TLV_OUI_M, ouisubtype);
switch (oui) {
case ICE_IEEE_8021QAZ_OUI:
ice_parse_ieee_tlv(tlv, dcbcfg);
@@ -616,8 +601,8 @@ static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
tlv = (struct ice_lldp_org_tlv *)lldpmib;
while (1) {
typelen = ntohs(tlv->typelen);
- type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ type = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += sizeof(typelen) + len;
/* END TLV or beyond LLDPDU size */
@@ -806,11 +791,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
dcbcfg->etscfg.prio_table[i * 2] =
- ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
- ICE_CEE_PGID_PRIO_0_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_0_M,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prio_table[i * 2 + 1] =
- ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
- ICE_CEE_PGID_PRIO_1_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_1_M,
+ cee_cfg->oper_prio_tc[i]);
}
ice_for_each_traffic_class(i) {
@@ -982,7 +967,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
- change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
+ change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
@@ -1483,7 +1468,7 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
while (1) {
ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
typelen = ntohs(tlv->typelen);
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
if (len)
offset += len + 2;
/* END TLV or beyond LLDPDU size */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 850db8e0e6b0..6e20ee610022 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -934,7 +934,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
skb->priority != TC_PRIO_CONTROL) {
first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
- first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+ first->vid |= FIELD_PREP(VLAN_PRIO_MASK, skb->priority);
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index e1fbc6de452d..6d50b90a7359 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -227,7 +227,7 @@ static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, PRTDCB_GENC);
- *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
+ *delay = FIELD_GET(PRTDCB_GENC_PFCLDA_M, val);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
new file mode 100644
index 000000000000..c2bfba6b9ead
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include "ice.h"
+
+static struct dentry *ice_debugfs_root;
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
+ */
+static const char * const ice_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+/* the order in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+/**
+ * ice_fwlog_print_module_cfg - print current FW logging module configuration
+ * @hw: pointer to the HW structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
+{
+ struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
+ struct ice_fwlog_module_entry *entry;
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * ice_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int ice_debugfs_module_show(struct seq_file *s, void *v)
+{
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ struct ice_pf *pf;
+ int module;
+
+ dentry = file_dentry(filp);
+ pf = s->private;
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(ice_pf_to_dev(pf), "unknown module\n");
+ return -EINVAL;
+ }
+
+ ice_fwlog_print_module_cfg(&pf->hw, module, s);
+
+ return 0;
+}
+
+static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, ice_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * ice_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = ice_pf_to_dev(pf);
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ ice_pf_fwlog_update_module(pf, log_level, module);
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
+ ice_pf_fwlog_update_module(pf, log_level, i);
+ }
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = ice_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = ice_debugfs_module_write,
+};
+
+/**
+ * ice_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ hw->fwlog_cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
+ ICE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ hw->fwlog_cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_nr_messages_read,
+ .write = ice_debugfs_nr_messages_write,
+};
+
+/**
+ * ice_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(hw->fwlog_cfg.options &
+ ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = ice_fwlog_register(hw);
+ else
+ ret = ice_fwlog_unregister(hw);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_enable_read,
+ .write = ice_debugfs_enable_write,
+};
+
+/**
+ * ice_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+ int index;
+
+ index = hw->fwlog_ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(ice_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ ice_fwlog_realloc_rings(hw, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_log_size_read,
+ .write = ice_debugfs_log_size_write,
+};
+
+/**
+ * ice_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ int data_copied = 0;
+ bool done = false;
+
+ if (ice_fwlog_ring_empty(&hw->fwlog_ring))
+ return 0;
+
+ while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
+ struct ice_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * ice_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_data_read,
+ .write = ice_debugfs_data_write,
+};
+
+/**
+ * ice_debugfs_fwlog_init - setup the debugfs directory
+ * @pf: the ice that is starting up
+ */
+void ice_debugfs_fwlog_init(struct ice_pf *pf)
+{
+ const char *name = pci_name(pf->pdev);
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* only support fw log commands on PF 0 */
+ if (pf->hw.bus.func)
+ return;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
+ pf->ice_debugfs_pf);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules",
+ pf->ice_debugfs_pf_fwlog);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
+ 0600, fw_modules_dir, pf,
+ &ice_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600,
+ pf->ice_debugfs_pf_fwlog, pf,
+ &ice_debugfs_nr_messages_fops);
+
+ pf->ice_debugfs_pf_fwlog_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
+ kfree(fw_modules);
+}
+
+/**
+ * ice_debugfs_init - create root directory for debugfs entries
+ */
+void ice_debugfs_init(void)
+{
+ ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(ice_debugfs_root))
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * ice_debugfs_exit - remove debugfs entries
+ */
+void ice_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ice_debugfs_root);
+ ice_debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 80dc5445b50d..65be56f2af9e 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -193,6 +193,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
}
+static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ u32 id, cfg_ver, fw_ver;
+
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver);
+}
+
+static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number);
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
@@ -235,6 +253,8 @@ static const struct ice_devlink_version {
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
+ fixed("cgu.id", ice_info_cgu_id),
+ running("fw.cgu", ice_info_cgu_fw_build),
};
/**
@@ -810,6 +830,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_vf *vf;
int i;
+ if (node->rate_node)
+ /* already added, skip to the next */
+ goto traverse_children;
+
if (node->parent == tc_node) {
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
@@ -831,6 +855,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
if (rate_node && !IS_ERR(rate_node))
node->rate_node = rate_node;
+traverse_children:
for (i = 0; i < node->num_children; i++)
ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
}
@@ -861,6 +886,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
return 0;
}
+static void ice_clear_rate_nodes(struct ice_sched_node *node)
+{
+ node->rate_node = NULL;
+
+ for (int i = 0; i < node->num_children; i++)
+ ice_clear_rate_nodes(node->children[i]);
+}
+
+/**
+ * ice_devlink_rate_clear_tx_topology - clear node->rate_node
+ * @vsi: main vsi struct
+ *
+ * Clear rate_node to cleanup creation of Tx topology.
+ *
+ */
+void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi)
+{
+ struct ice_port_info *pi = vsi->port_info;
+
+ mutex_lock(&pi->sched_lock);
+ ice_clear_rate_nodes(pi->root->children[0]);
+ mutex_unlock(&pi->sched_lock);
+}
+
/**
* ice_set_object_tx_share - sets node scheduling parameter
* @pi: devlink struct instance
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index 6ec96779f52e..d291c0e2e17b 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf);
int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
void ice_tear_down_devlink_rate_tree(struct ice_pf *pf);
+void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi);
#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 86b180cb32a0..bd9b1fed74ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -31,6 +31,26 @@ static const char * const pin_type_name[] = {
};
/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+ if (ice_is_reset_in_progress(pf->state)) {
+ NL_SET_ERR_MSG(extack, "PF reset in progress");
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure
* @pin: pointer to a pin
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled
* @extack: error reporting
*
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/
static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
- enum ice_dpll_pin_type pin_type,
+ u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack)
{
u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
- ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+ 0, 0);
break;
default:
return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0],
- &pin->freq, NULL);
+ &pin->freq, &pin->phase_adjust);
if (ret)
goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
- &pin->flags[0], NULL,
+ &pin->flags[0], &parent,
&pin->freq, NULL);
if (ret)
goto err;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
- pin->state[0] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+ parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ parent == pf->dplls.eec.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ parent == pf->dplls.pps.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -513,31 +552,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
- * ice_dpll_mode_supported - check if dpll's working mode is supported
- * @dpll: registered dpll pointer
- * @dpll_priv: private data pointer passed on dpll registration
- * @mode: mode to be checked for support
- * @extack: error reporting
- *
- * Dpll subsystem callback. Provides information if working mode is supported
- * by dpll.
- *
- * Return:
- * * true - mode is supported
- * * false - mode is not supported
- */
-static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
- void *dpll_priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- if (mode == DPLL_MODE_AUTOMATIC)
- return true;
-
- return false;
-}
-
-/**
* ice_dpll_mode_get - get dpll's working mode
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
@@ -593,9 +607,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
if (enable)
- ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+ extack);
else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret)
@@ -628,6 +646,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack)
{
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+
+ if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+ return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -690,14 +713,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret)
goto unlock;
- if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+ pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx];
- else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
- *state = p->state[0];
ret = 0;
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -815,6 +840,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@@ -935,6 +963,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1094,6 +1125,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1148,6 +1182,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1197,7 +1234,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
static const struct dpll_device_ops ice_dpll_ops = {
.lock_status_get = ice_dpll_lock_status_get,
- .mode_supported = ice_dpll_mode_supported,
.mode_get = ice_dpll_mode_get,
};
@@ -1331,8 +1367,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
- int ret;
+ int ret = 0;
+ if (ice_is_reset_in_progress(pf->state))
+ goto resched;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
@@ -1352,6 +1390,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+resched:
/* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) :
@@ -1558,7 +1597,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
- netdev_dpll_pin_clear(vsi->netdev);
+ dpll_netdev_pin_clear(vsi->netdev);
dpll_pin_put(rclk->pin);
}
@@ -1602,7 +1641,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
- netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
@@ -2081,6 +2120,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
int err = 0;
+ mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu);
if (err)
goto err_exit;
@@ -2093,7 +2133,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu);
if (err)
goto deinit_pps;
- mutex_init(&d->lock);
if (cgu) {
err = ice_dpll_init_worker(pf);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index a655d499abfa..9069725c71b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -11,17 +11,34 @@
#include "ice_tc_lib.h"
/**
- * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
+ * ice_eswitch_del_sp_rules - delete adv rules added on PRs
+ * @pf: pointer to the PF struct
+ *
+ * Delete all advanced rules that were used to forward packets with the
+ * device's VSI index to the corresponding eswitch ctrl VSI queue.
+ */
+static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&pf->eswitch.reprs, id, repr) {
+ if (repr->sp_rule.rid)
+ ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule);
+ }
+}
+
+/**
+ * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
* @pf: pointer to PF struct
- * @vf: pointer to VF struct
+ * @repr: pointer to the repr struct
*
* This function adds advanced rule that forwards packets with
- * VF's VSI index to the corresponding switchdev ctrl VSI queue.
+ * device's VSI index to the corresponding eswitch ctrl VSI queue.
*/
-static int
-ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
+static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_adv_lkup_elem *list;
struct ice_hw *hw = &pf->hw;
@@ -38,39 +55,42 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
- ctrl_vsi->rxq_map[vf->vf_id];
+ ctrl_vsi->rxq_map[repr->q_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
- rule_info.src_vsi = vf->lan_vsi_idx;
+ rule_info.src_vsi = repr->src_vsi->idx;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- &vf->repr->sp_rule);
+ &repr->sp_rule);
if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
- vf->vf_id);
+ dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
+ repr->id);
kfree(list);
return err;
}
-/**
- * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
- * @vf: pointer to the VF struct
- *
- * Delete the advanced rule that was used to forward packets with the VF's VSI
- * index to the corresponding switchdev ctrl VSI queue.
- */
-static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
+static int
+ice_eswitch_add_sp_rules(struct ice_pf *pf)
{
- if (!vf->repr)
- return;
+ struct ice_repr *repr;
+ unsigned long id;
+ int err;
+
+ xa_for_each(&pf->eswitch.reprs, id, repr) {
+ err = ice_eswitch_add_sp_rule(pf, repr);
+ if (err) {
+ ice_eswitch_del_sp_rules(pf);
+ return err;
+ }
+ }
- ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
+ return 0;
}
/**
- * ice_eswitch_setup_env - configure switchdev HW filters
+ * ice_eswitch_setup_env - configure eswitch HW filters
* @pf: pointer to PF struct
*
* This function adds HW filters configuration specific for switchdev
@@ -78,18 +98,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
*/
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
- struct net_device *uplink_netdev = uplink_vsi->netdev;
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct net_device *netdev = uplink_vsi->netdev;
struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
- netif_addr_lock_bh(uplink_netdev);
- __dev_uc_unsync(uplink_netdev, NULL);
- __dev_mc_unsync(uplink_netdev, NULL);
- netif_addr_unlock_bh(uplink_netdev);
+ netif_addr_lock_bh(netdev);
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+ netif_addr_unlock_bh(netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
@@ -132,19 +152,20 @@ err_def_rx:
}
/**
- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
- * @pf: pointer to PF struct
+ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
+ * @eswitch: pointer to eswitch struct
*
- * In switchdev number of allocated Tx/Rx rings is equal.
+ * In eswitch number of allocated Tx/Rx rings is equal.
*
* This function fills q_vectors structures associated with representor and
* move each ring pairs to port representor netdevs. Each port representor
* will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
* number of VFs.
*/
-static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
{
- struct ice_vsi *vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *vsi = eswitch->control_vsi;
+ unsigned long repr_id = 0;
int q_id;
ice_for_each_txq(vsi, q_id) {
@@ -152,13 +173,14 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
struct ice_repr *repr;
- struct ice_vf *vf;
- vf = ice_get_vf_by_id(pf, q_id);
- if (WARN_ON(!vf))
- continue;
+ repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
+ XA_PRESENT);
+ if (!repr)
+ break;
- repr = vf->repr;
+ repr_id += 1;
+ repr->q_id = q_id;
q_vector = repr->q_vector;
tx_ring = vsi->tx_rings[q_id];
rx_ring = vsi->rx_rings[q_id];
@@ -181,136 +203,96 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
-
- ice_put_vf(vf);
}
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * ice_eswitch_release_repr - clear PR VSI configuration
* @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
+ * @repr: pointer to PR
*/
static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ struct ice_vsi *vsi = repr->src_vsi;
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_vsi *vsi = vf->repr->src_vsi;
-
- /* Skip VFs that aren't configured */
- if (!vf->repr->dst)
- continue;
+ /* Skip representors that aren't configured */
+ if (!repr->dst)
+ return;
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- ice_eswitch_del_vf_sp_rule(vf);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(repr->dst);
+ repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
+ ICE_FWD_TO_VSI);
- netif_napi_del(&vf->repr->q_vector->napi);
- }
+ netif_napi_del(&repr->q_vector->napi);
}
/**
- * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * ice_eswitch_setup_repr - configure PR to run in switchdev mode
* @pf: pointer to PF struct
+ * @repr: pointer to PR struct
*/
-static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- int max_vsi_num = 0;
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_vsi *vsi = vf->repr->src_vsi;
-
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
- vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
- GFP_KERNEL);
- if (!vf->repr->dst) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- goto err;
- }
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
- if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- goto err;
- }
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!repr->dst)
+ goto err_add_mac_fltr;
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- ice_eswitch_del_vf_sp_rule(vf);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- goto err;
- }
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
+ goto err_dst_free;
- if (ice_vsi_add_vlan_zero(vsi)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- ice_eswitch_del_vf_sp_rule(vf);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- goto err;
- }
-
- if (max_vsi_num < vsi->vsi_num)
- max_vsi_num = vsi->vsi_num;
+ if (ice_vsi_add_vlan_zero(vsi))
+ goto err_update_security;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
- ice_napi_poll);
+ netif_napi_add(repr->netdev, &repr->q_vector->napi,
+ ice_napi_poll);
- netif_keep_dst(vf->repr->netdev);
- }
+ netif_keep_dst(repr->netdev);
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_repr *repr = vf->repr;
- struct ice_vsi *vsi = repr->src_vsi;
- struct metadata_dst *dst;
-
- dst = repr->dst;
- dst->u.port_info.port_id = vsi->vsi_num;
- dst->u.port_info.lower_dev = repr->netdev;
- ice_repr_set_traffic_vsi(repr, ctrl_vsi);
- }
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = repr->netdev;
+ ice_repr_set_traffic_vsi(repr, ctrl_vsi);
return 0;
-err:
- ice_eswitch_release_reprs(pf, ctrl_vsi);
+err_update_security:
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+err_dst_free:
+ metadata_dst_free(repr->dst);
+ repr->dst = NULL;
+err_add_mac_fltr:
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
return -ENODEV;
}
/**
- * ice_eswitch_update_repr - reconfigure VF port representor
- * @vsi: VF VSI for which port representor is configured
+ * ice_eswitch_update_repr - reconfigure port representor
+ * @repr_id: representor ID
+ * @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- struct ice_vf *vf;
int ret;
if (!ice_is_switchdev_running(pf))
return;
- vf = vsi->vf;
- repr = vf->repr;
+ repr = xa_load(&pf->eswitch.reprs, repr_id);
+ if (!repr)
+ return;
+
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
@@ -319,9 +301,10 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
- vsi->vf->vf_id);
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
+ ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
+ repr->id);
}
}
@@ -353,13 +336,13 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
- skb->queue_mapping = repr->vf->vf_id;
+ skb->queue_mapping = repr->q_id;
return ice_start_xmit(skb, netdev);
}
/**
- * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
* @skb: pointer to send buffer
* @off: pointer to offload struct
*/
@@ -375,14 +358,14 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
- dst_vsi = ((u64)dst->u.port_info.port_id <<
- ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
+ dst->u.port_info.port_id);
off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
}
}
/**
- * ice_eswitch_release_env - clear switchdev HW filters
+ * ice_eswitch_release_env - clear eswitch HW filters
* @pf: pointer to PF struct
*
* This function removes HW filters configuration specific for switchdev
@@ -390,8 +373,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
*/
static void ice_eswitch_release_env(struct ice_pf *pf)
{
- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
@@ -407,7 +390,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
}
/**
- * ice_eswitch_vsi_setup - configure switchdev control VSI
+ * ice_eswitch_vsi_setup - configure eswitch control VSI
* @pf: pointer to PF structure
* @pi: pointer to port_info structure
*/
@@ -424,48 +407,29 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
- * ice_eswitch_napi_del - remove NAPI handle for all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_napi_del(struct ice_pf *pf)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- netif_napi_del(&vf->repr->q_vector->napi);
-}
-
-/**
* ice_eswitch_napi_enable - enable NAPI for all port representors
- * @pf: pointer to PF structure
+ * @reprs: xarray of reprs
*/
-static void ice_eswitch_napi_enable(struct ice_pf *pf)
+static void ice_eswitch_napi_enable(struct xarray *reprs)
{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
+ struct ice_repr *repr;
+ unsigned long id;
- ice_for_each_vf(pf, bkt, vf)
- napi_enable(&vf->repr->q_vector->napi);
+ xa_for_each(reprs, id, repr)
+ napi_enable(&repr->q_vector->napi);
}
/**
* ice_eswitch_napi_disable - disable NAPI for all port representors
- * @pf: pointer to PF structure
+ * @reprs: xarray of reprs
*/
-static void ice_eswitch_napi_disable(struct ice_pf *pf)
+static void ice_eswitch_napi_disable(struct xarray *reprs)
{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
+ struct ice_repr *repr;
+ unsigned long id;
- ice_for_each_vf(pf, bkt, vf)
- napi_disable(&vf->repr->q_vector->napi);
+ xa_for_each(reprs, id, repr)
+ napi_disable(&repr->q_vector->napi);
}
/**
@@ -486,39 +450,26 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -EINVAL;
}
- pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
- if (!pf->switchdev.control_vsi)
+ pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+ if (!pf->eswitch.control_vsi)
return -ENODEV;
- ctrl_vsi = pf->switchdev.control_vsi;
- pf->switchdev.uplink_vsi = uplink_vsi;
+ ctrl_vsi = pf->eswitch.control_vsi;
+ /* cp VSI is createad with 1 queue as default */
+ pf->eswitch.qs.value = 1;
+ pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
goto err_vsi;
- if (ice_repr_add_for_all_vfs(pf))
- goto err_repr_add;
-
- if (ice_eswitch_setup_reprs(pf))
- goto err_setup_reprs;
-
- ice_eswitch_remap_rings_to_vectors(pf);
-
- if (ice_vsi_open(ctrl_vsi))
- goto err_setup_reprs;
-
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
- ice_eswitch_napi_enable(pf);
+ pf->eswitch.is_running = true;
return 0;
err_br_offloads:
- ice_vsi_close(ctrl_vsi);
-err_setup_reprs:
- ice_repr_rem_from_all_vfs(pf);
-err_repr_add:
ice_eswitch_release_env(pf);
err_vsi:
ice_vsi_release(ctrl_vsi);
@@ -526,19 +477,19 @@ err_vsi:
}
/**
- * ice_eswitch_disable_switchdev - disable switchdev resources
+ * ice_eswitch_disable_switchdev - disable eswitch resources
* @pf: pointer to PF structure
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
- ice_eswitch_napi_disable(pf);
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
- ice_repr_rem_from_all_vfs(pf);
+
+ pf->eswitch.is_running = false;
+ pf->eswitch.qs.is_reaching = false;
}
/**
@@ -566,6 +517,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
case DEVLINK_ESWITCH_MODE_LEGACY:
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
pf->hw.pf_id);
+ xa_destroy(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
@@ -578,6 +530,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
+ xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
break;
}
@@ -616,74 +569,168 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
}
/**
- * ice_eswitch_release - cleanup eswitch
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
* @pf: pointer to PF structure
*/
-void ice_eswitch_release(struct ice_pf *pf)
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ struct ice_repr *repr;
+ unsigned long id;
+
+ if (test_bit(ICE_DOWN, pf->state))
return;
- ice_eswitch_disable_switchdev(pf);
- pf->switchdev.is_running = false;
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_repr_start_tx_queues(repr);
}
/**
- * ice_eswitch_configure - configure eswitch
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
* @pf: pointer to PF structure
*/
-int ice_eswitch_configure(struct ice_pf *pf)
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- int status;
-
- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
- return 0;
+ struct ice_repr *repr;
+ unsigned long id;
- status = ice_eswitch_enable_switchdev(pf);
- if (status)
- return status;
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
- pf->switchdev.is_running = true;
- return 0;
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_repr_stop_tx_queues(repr);
}
-/**
- * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+static void ice_eswitch_stop_reprs(struct ice_pf *pf)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ ice_eswitch_del_sp_rules(pf);
+ ice_eswitch_stop_all_tx_queues(pf);
+ ice_eswitch_napi_disable(&pf->eswitch.reprs);
+}
- lockdep_assert_held(&pf->vfs.table_lock);
+static void ice_eswitch_start_reprs(struct ice_pf *pf)
+{
+ ice_eswitch_napi_enable(&pf->eswitch.reprs);
+ ice_eswitch_start_all_tx_queues(pf);
+ ice_eswitch_add_sp_rules(pf);
+}
- if (test_bit(ICE_DOWN, pf->state))
- return;
+static void
+ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
+{
+ struct ice_vsi *cp = eswitch->control_vsi;
+ int queues = 0;
+
+ if (eswitch->qs.is_reaching) {
+ if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
+ queues = eswitch->qs.to_reach;
+ eswitch->qs.is_reaching = false;
+ } else {
+ queues = 0;
+ }
+ } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
+ change < 0) {
+ queues = cp->alloc_txq + change;
+ }
- ice_for_each_vf(pf, bkt, vf) {
- if (vf->repr)
- ice_repr_start_tx_queues(vf->repr);
+ if (queues) {
+ cp->req_txq = queues;
+ cp->req_rxq = queues;
+ ice_vsi_close(cp);
+ ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
+ ice_vsi_open(cp);
+ } else if (!change) {
+ /* change == 0 means that VSI wasn't open, open it here */
+ ice_vsi_open(cp);
}
+
+ eswitch->qs.value += change;
+ ice_eswitch_remap_rings_to_vectors(eswitch);
}
-/**
- * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
- * @pf: pointer to PF structure
- */
-void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ struct ice_repr *repr;
+ int change = 1;
+ int err;
- lockdep_assert_held(&pf->vfs.table_lock);
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return 0;
- if (test_bit(ICE_DOWN, pf->state))
+ if (xa_empty(&pf->eswitch.reprs)) {
+ err = ice_eswitch_enable_switchdev(pf);
+ if (err)
+ return err;
+ /* Control plane VSI is created with 1 queue as default */
+ pf->eswitch.qs.to_reach -= 1;
+ change = 0;
+ }
+
+ ice_eswitch_stop_reprs(pf);
+
+ repr = ice_repr_add_vf(vf);
+ if (IS_ERR(repr)) {
+ err = PTR_ERR(repr);
+ goto err_create_repr;
+ }
+
+ err = ice_eswitch_setup_repr(pf, repr);
+ if (err)
+ goto err_setup_repr;
+
+ err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
+ XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ if (err)
+ goto err_xa_alloc;
+
+ vf->repr_id = repr->id;
+
+ ice_eswitch_cp_change_queues(&pf->eswitch, change);
+ ice_eswitch_start_reprs(pf);
+
+ return 0;
+
+err_xa_alloc:
+ ice_eswitch_release_repr(pf, repr);
+err_setup_repr:
+ ice_repr_rem_vf(repr);
+err_create_repr:
+ if (xa_empty(&pf->eswitch.reprs))
+ ice_eswitch_disable_switchdev(pf);
+ ice_eswitch_start_reprs(pf);
+
+ return err;
+}
+
+void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ if (!repr)
return;
- ice_for_each_vf(pf, bkt, vf) {
- if (vf->repr)
- ice_repr_stop_tx_queues(vf->repr);
+ ice_eswitch_stop_reprs(pf);
+ xa_erase(&pf->eswitch.reprs, repr->id);
+
+ if (xa_empty(&pf->eswitch.reprs))
+ ice_eswitch_disable_switchdev(pf);
+ else
+ ice_eswitch_cp_change_queues(&pf->eswitch, -1);
+
+ ice_eswitch_release_repr(pf, repr);
+ ice_repr_rem_vf(repr);
+
+ if (xa_empty(&pf->eswitch.reprs)) {
+ /* since all port representors are destroyed, there is
+ * no point in keeping the nodes
+ */
+ ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
+ devl_lock(devlink);
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
+ } else {
+ ice_eswitch_start_reprs(pf);
}
}
@@ -693,30 +740,35 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
*/
int ice_eswitch_rebuild(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- int status;
-
- ice_eswitch_napi_disable(pf);
- ice_eswitch_napi_del(pf);
-
- status = ice_eswitch_setup_env(pf);
- if (status)
- return status;
+ struct ice_repr *repr;
+ unsigned long id;
+ int err;
- status = ice_eswitch_setup_reprs(pf);
- if (status)
- return status;
+ if (!ice_is_switchdev_running(pf))
+ return 0;
- ice_eswitch_remap_rings_to_vectors(pf);
+ err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
+ if (err)
+ return err;
- ice_replay_tc_fltrs(pf);
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_eswitch_detach(pf, repr->vf);
- status = ice_vsi_open(ctrl_vsi);
- if (status)
- return status;
+ return 0;
+}
- ice_eswitch_napi_enable(pf);
- ice_eswitch_start_all_tx_queues(pf);
+/**
+ * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
+ * @pf: pointer to PF structure
+ * @change: how many more (or less) queues is needed
+ *
+ * Remember to call ice_eswitch_attach/detach() the "change" times.
+ */
+void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
+{
+ if (pf->eswitch.qs.value + change < 0)
+ return;
- return 0;
+ pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
+ pf->eswitch.qs.is_reaching = true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index b18bf83a2f5b..1a288a03a79a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -7,8 +7,9 @@
#include <net/devlink.h>
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_release(struct ice_pf *pf);
-int ice_eswitch_configure(struct ice_pf *pf);
+void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
+int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
int ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
@@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -25,8 +26,15 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_release(struct ice_pf *pf) { }
+static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
@@ -34,7 +42,8 @@ static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
-static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+static inline void
+ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
@@ -68,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
return NETDEV_TX_BUSY;
}
+
+static inline void
+ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index 6ae0269bdf73..ac5beecd028b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
}
- if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
+ if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
- else if (vsi->vf && vsi->vf->repr)
- vsi->vf->repr->br_port = NULL;
+ } else {
+ struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+
+ if (repr)
+ repr->br_port = NULL;
+ }
xa_erase(&bridge->ports, br_port->vsi_idx);
ice_eswitch_br_port_vlans_flush(br_port);
@@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
static int
ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
{
- struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
struct ice_esw_br_port *br_port;
int err;
@@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb,
static void
ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
{
- struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
+ struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
ASSERT_RTNL();
@@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
- pf->switchdev.br_offloads = NULL;
+ pf->eswitch.br_offloads = NULL;
kfree(br_offloads);
}
@@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
ASSERT_RTNL();
- if (pf->switchdev.br_offloads)
+ if (pf->eswitch.br_offloads)
return ERR_PTR(-EEXIST);
br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
- pf->switchdev.br_offloads = br_offloads;
+ pf->eswitch.br_offloads = br_offloads;
br_offloads->pf = pf;
return br_offloads;
@@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
- br_offloads = pf->switchdev.br_offloads;
+ br_offloads = pf->eswitch.br_offloads;
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bde9bc74f928..a19b06f18e40 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,7 +129,6 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
- ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -1142,8 +1141,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_vsi_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
if (ice_is_port_repr_netdev(netdev))
return;
@@ -1162,8 +1160,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_pf_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
@@ -1179,8 +1176,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_priv_flags[i].name);
+ ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -2505,27 +2501,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
-#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
-#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
-#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
-#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
-
/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
* @nfc: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended
* hash fields for RSS configuration
*/
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2594,9 +2578,11 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
+ struct ice_rss_hash_cfg cfg;
struct device *dev;
u64 hashed_flds;
int status;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2606,7 +2592,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- hashed_flds = ice_parse_hash_flds(nfc);
+ symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ hashed_flds = ice_parse_hash_flds(nfc, symm);
if (hashed_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
vsi->vsi_num);
@@ -2620,7 +2607,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ cfg.hash_flds = hashed_flds;
+ cfg.addl_hdrs = hdrs;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ cfg.symm = symm;
+
+ status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
@@ -2641,6 +2633,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2659,7 +2652,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return;
}
- hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
@@ -3198,11 +3191,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
+/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ *
+ * Reads the indirection table directly from the hardware.
+ */
static int
-ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
@@ -3233,17 +3233,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
vsi = vsi->tc_map_vsi[rss_context];
}
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
- if (!indir)
+ if (!rxfh->indir)
return 0;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
- err = ice_get_rss_key(vsi, key);
+ err = ice_get_rss_key(vsi, rxfh->key);
if (err)
goto out;
@@ -3253,12 +3254,12 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
if (ice_is_adq_active(pf)) {
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = offset + lut[i] % qcount;
+ rxfh->indir[i] = offset + lut[i] % qcount;
goto out;
}
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = lut[i];
+ rxfh->indir[i] = lut[i];
out:
kfree(lut);
@@ -3266,42 +3267,31 @@ out:
}
/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
-static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
-{
- return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
static int
-ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
return -EOPNOTSUPP;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3315,7 +3305,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EOPNOTSUPP;
}
- if (key) {
+ /* Update the VSI's hash function */
+ if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ err = ice_set_rss_hfunc(vsi, hfunc);
+ if (err)
+ return err;
+
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
@@ -3323,7 +3321,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key,
+ ICE_VSIQF_HKEY_ARRAY_SIZE);
err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
if (err)
@@ -3338,11 +3337,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < vsi->rss_table_size; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
} else {
ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
vsi->rss_size);
@@ -4220,9 +4219,11 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
+ .cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -4253,7 +4254,6 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
- .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d151e5bacfec..9a1a04f5f146 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
continue;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
-
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ u64 prof_id = prof->prof_id[tun];
for (i = 0; i < prof->cnt; i++) {
if (prof->vsi_h[i] != vsi_idx)
@@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
return;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
+ u64 prof_id = prof->prof_id[tun];
int j;
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
for (j = 0; j < prof->cnt; j++) {
u16 vsi_num;
@@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
struct ice_flow_prof *hw_prof;
struct ice_fd_hw_prof *prof;
- u64 prof_id;
int j;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
prof->fdir_seg[tun], TNL_SEG_CNT(tun),
- &hw_prof);
+ false, &hw_prof);
for (j = 0; j < prof->cnt; j++) {
enum ice_flow_priority prio;
u64 entry_h = 0;
@@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
prio = ICE_FLOW_PRIO_NORMAL;
err = ice_flow_add_entry(hw, ICE_BLK_FD,
- prof_id,
+ hw_prof->id,
prof->vsi_h[0],
prof->vsi_h[j],
prio, prof->fdir_seg,
@@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
flow);
continue;
}
+ prof->prof_id[tun] = hw_prof->id;
prof->entry_h[j][tun] = entry_h;
}
}
@@ -507,8 +503,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
return -EINVAL;
data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
- data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
- ICE_USERDEF_FLEX_OFFS_S;
+ data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
@@ -638,7 +633,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
u64 entry1_h = 0;
u64 entry2_h = 0;
bool del_last;
- u64 prof_id;
int err;
int idx;
@@ -668,7 +662,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* then return error.
*/
if (hw->fdir_fltr_cnt[flow]) {
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return -EINVAL;
}
@@ -686,23 +680,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* That is the final parameters are 1 header (segment), no
* actions (NULL) and zero actions 0.
*/
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ TNL_SEG_CNT(tun), false, &prof);
if (err)
return err;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (err)
goto err_prof;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (err)
goto err_entry;
hw_prof->fdir_seg[tun] = seg;
+ hw_prof->prof_id[tun] = prof->id;
hw_prof->entry_h[0][tun] = entry1_h;
hw_prof->entry_h[1][tun] = entry2_h;
hw_prof->vsi_h[0] = main_vsi->idx;
@@ -719,7 +713,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
entry1_h = 0;
vsi_h = main_vsi->tc_map_vsi[idx]->idx;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
main_vsi->idx, vsi_h,
ICE_FLOW_PRIO_NORMAL, seg,
&entry1_h);
@@ -756,7 +750,7 @@ err_unroll:
if (!hw_prof->entry_h[idx][tun])
continue;
- ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
hw_prof->entry_h[idx][tun] = 0;
if (del_last)
@@ -766,11 +760,11 @@ err_unroll:
hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return err;
}
@@ -1853,6 +1847,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
struct ice_pf *pf;
struct ice_hw *hw;
int fltrs_needed;
+ u32 max_location;
u16 tunnel_port;
int ret;
@@ -1884,8 +1879,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
if (ret)
return ret;
- if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ max_location = ice_get_fdir_cnt_all(hw);
+ if (fsp->location >= max_location) {
+ dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
+ max_location);
return -ENOSPC;
}
@@ -1893,7 +1890,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
return -ENOSPC;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index ae089d32ee9d..5840c3e04a5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -604,55 +604,32 @@ ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,
u64 qword;
/* prep QW0 of FD filter programming desc */
- qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &
- ICE_FXD_FLTR_QW0_QINDEX_M;
- qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &
- ICE_FXD_FLTR_QW0_COMP_Q_M;
- qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &
- ICE_FXD_FLTR_QW0_COMP_REPORT_M;
- qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &
- ICE_FXD_FLTR_QW0_FD_SPACE_M;
- qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &
- ICE_FXD_FLTR_QW0_STAT_CNT_M;
- qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &
- ICE_FXD_FLTR_QW0_STAT_ENA_M;
- qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &
- ICE_FXD_FLTR_QW0_EVICT_ENA_M;
- qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &
- ICE_FXD_FLTR_QW0_TO_Q_M;
- qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &
- ICE_FXD_FLTR_QW0_TO_Q_PRI_M;
- qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &
- ICE_FXD_FLTR_QW0_DPU_RECIPE_M;
- qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &
- ICE_FXD_FLTR_QW0_DROP_M;
- qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &
- ICE_FXD_FLTR_QW0_FLEX_PRI_M;
- qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &
- ICE_FXD_FLTR_QW0_FLEX_MDID_M;
- qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &
- ICE_FXD_FLTR_QW0_FLEX_VAL_M;
+ qword = FIELD_PREP(ICE_FXD_FLTR_QW0_QINDEX_M, ctx->qindex);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_Q_M, ctx->comp_q);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_REPORT_M, ctx->comp_report);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FD_SPACE_M, ctx->fd_space);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_CNT_M, ctx->cnt_index);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_ENA_M, ctx->cnt_ena);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_EVICT_ENA_M, ctx->evict_ena);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_M, ctx->toq);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_PRI_M, ctx->toq_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DPU_RECIPE_M, ctx->dpu_recipe);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DROP_M, ctx->drop);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_PRI_M, ctx->flex_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_MDID_M, ctx->flex_mdid);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_VAL_M, ctx->flex_val);
fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);
/* prep QW1 of FD filter programming desc */
- qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &
- ICE_FXD_FLTR_QW1_DTYPE_M;
- qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &
- ICE_FXD_FLTR_QW1_PCMD_M;
- qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &
- ICE_FXD_FLTR_QW1_PROF_PRI_M;
- qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &
- ICE_FXD_FLTR_QW1_PROF_M;
- qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &
- ICE_FXD_FLTR_QW1_FD_VSI_M;
- qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &
- ICE_FXD_FLTR_QW1_SWAP_M;
- qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &
- ICE_FXD_FLTR_QW1_FDID_PRI_M;
- qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &
- ICE_FXD_FLTR_QW1_FDID_MDID_M;
- qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &
- ICE_FXD_FLTR_QW1_FDID_M;
+ qword = FIELD_PREP(ICE_FXD_FLTR_QW1_DTYPE_M, ctx->dtype);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PCMD_M, ctx->pcmd);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_PRI_M, ctx->desc_prof_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_M, ctx->desc_prof);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FD_VSI_M, ctx->fd_vsi);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_SWAP_M, ctx->swap);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_PRI_M, ctx->fdid_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_MDID_M, ctx->fdid_mdid);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_M, ctx->fdid);
fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 5ce413965930..20d5db88c99f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @blk: HW block
* @fv: field vector to search for
* @masks: masks for FV
+ * @symm: symmetric setting for RSS flows
* @prof_id: receives the profile ID
*/
static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
+ struct ice_fv_word *fv, u16 *masks, bool symm,
+ u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u8 i;
@@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
+ if (blk == ICE_BLK_RSS && es->symm[i] != symm)
+ continue;
+
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
@@ -1409,13 +1414,13 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
switch (blk) {
case ICE_BLK_RSS:
offset = GLQF_HMASK(mask_idx);
- val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+ val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx);
+ val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask);
break;
case ICE_BLK_FD:
offset = GLQF_FDMASK(mask_idx);
- val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
- val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
+ val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx);
+ val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask);
break;
default:
ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
@@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
}
/**
- * ice_write_es - write an extraction sequence to hardware
+ * ice_write_es - write an extraction sequence and symmetric setting to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
* @prof_id: the profile ID to write
* @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ * @symm: symmetric setting for RSS profiles
*/
static void
ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
- struct ice_fv_word *fv)
+ struct ice_fv_word *fv, bool symm)
{
u16 off;
@@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
memcpy(&hw->blk[blk].es.t[off], fv,
hw->blk[blk].es.fvw * sizeof(*fv));
}
+
+ if (blk == ICE_BLK_RSS)
+ hw->blk[blk].es.symm[prof_id] = symm;
}
/**
@@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
- ice_write_es(hw, blk, prof_id, NULL);
+ ice_write_es(hw, blk, prof_id, NULL, false);
ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
@@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
+ memset(es->symm, 0, es->count * sizeof(*es->symm));
memset(es->written, 0, es->count * sizeof(*es->written));
memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
+
+ memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
}
}
@@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw)
ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw)
if (!es->ref_count)
goto err;
+ es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->symm), GFP_KERNEL);
+ if (!es->symm)
+ goto err;
+
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
@@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->mask_ena), GFP_KERNEL);
if (!es->mask_ena)
goto err;
+
+ prof_id->count = blk_sizes[i].prof_id;
+ prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
+ sizeof(*prof_id->id), GFP_KERNEL);
+ if (!prof_id->id)
+ goto err;
}
return 0;
@@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
+ * @symm: symmetric setting for RSS profiles
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks)
+ struct ice_fv_word *es, u16 *masks, bool symm)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
goto err_ice_add_prof;
/* and write new es */
- ice_write_es(hw, blk, prof_id, es);
+ ice_write_es(hw, blk, prof_id, es, symm);
}
ice_prof_inc_ref(hw, blk, prof_id);
@@ -3097,7 +3125,7 @@ err_ice_add_prof:
* This will search for a profile tracking ID which was previously added.
* The profile map lock should be held before calling this function.
*/
-static struct ice_prof_map *
+struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 7af7c8e9aa4e..b39d7cdc381f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks);
+ struct ice_fv_word *es, u16 *masks, bool symm);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 4f42e14ed3ae..d427a79d001a 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -146,6 +146,7 @@ struct ice_es {
u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
+ u8 *symm; /* symmetric setting per profile (RSS blk)*/
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
@@ -304,10 +305,16 @@ struct ice_masks {
struct ice_mask masks[ICE_PROF_MASK_COUNT];
};
+struct ice_prof_id {
+ unsigned long *id;
+ int count;
+};
+
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
+ struct ice_prof_id prof_id;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fb8b925aaf8b..fc2b58f56279 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008
/**
* ice_flow_find_prof_conds - Find a profile matching headers and conditions
@@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
* @dir: flow direction
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
* @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
*/
static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
- u8 segs_cnt, u16 vsi_handle, u32 conds)
+ u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds)
{
struct ice_flow_prof *p, *prof = NULL;
@@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
!test_bit(vsi_handle, p->vsis))
continue;
+ /* Check for symmetric settings */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
+ p->symm != symm)
+ continue;
+
/* Protocol headers must be checked. Matched fields are
* checked if specified.
*/
@@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*
* Assumption: the caller has acquired the lock to the profile list
*/
static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
- enum ice_flow_dir dir, u64 prof_id,
+ enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ bool symm, struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
+ struct ice_prof_id *ids;
int status;
+ u64 prof_id;
u8 i;
if (!prof)
return -EINVAL;
+ ids = &hw->blk[blk].prof_id;
+ prof_id = find_first_zero_bit(ids->id, ids->count);
+ if (prof_id >= ids->count)
+ return -ENOSPC;
+
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
params->prof->id = prof_id;
params->prof->dir = dir;
params->prof->segs_cnt = segs_cnt;
+ params->prof->symm = symm;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
@@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask);
+ params->mask, symm);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
INIT_LIST_HEAD(&params->prof->entries);
mutex_init(&params->prof->entries_lock);
+ set_bit(prof_id, ids->id);
*prof = params->prof;
out:
@@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
+ clear_bit(prof->id, hw->blk[blk].prof_id.id);
list_del(&prof->l_entry);
mutex_destroy(&prof->entries_lock);
devm_kfree(ice_hw_to_dev(hw), prof);
@@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*/
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof)
{
int status;
@@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
mutex_lock(&hw->fl_profs_locks[blk]);
- status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
- prof);
+ status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
+ symm, prof);
if (!status)
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
@@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
/**
* ice_flow_set_rss_seg_info - setup packet segments for RSS
* @segs: pointer to the flow field segment(s)
- * @hash_fields: fields to be hashed on for the segment(s)
- * @flow_hdr: protocol header fields within a packet segment
+ * @seg_cnt: segment count
+ * @cfg: configure parameters
*
* Helper function to extract fields from hash bitmap and use flow
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
static int
-ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
- u32 flow_hdr)
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_flow_seg_info *seg;
u64 val;
- u8 i;
+ u16 i;
- for_each_set_bit(i, (unsigned long *)&hash_fields,
- ICE_FLOW_FIELD_IDX_MAX)
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ /* set inner most segment */
+ seg = &segs[seg_cnt - 1];
+
+ for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
+ (u16)ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
- ICE_FLOW_SET_HDRS(segs, flow_hdr);
+ ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
+
+ /* set outer most header */
+ if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
+ else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return -EINVAL;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
@@ -1956,6 +1985,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_get_rss_hdr_type - get a RSS profile's header type
+ * @prof: RSS flow profile
+ */
+static enum ice_rss_cfg_hdr_type
+ice_get_rss_hdr_type(struct ice_flow_prof *prof)
+{
+ if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
+ return ICE_RSS_OUTER_HEADERS;
+ } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
+ const struct ice_flow_seg_info *s;
+
+ s = &prof->segs[ICE_RSS_OUTER_HEADERS];
+ if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
+ return ICE_RSS_INNER_HEADERS;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ }
+
+ return ICE_RSS_ANY_HEADERS;
+}
+
+static bool
+ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof,
+ enum ice_rss_cfg_hdr_type hdr_type)
+{
+ return (r->hash.hdr_type == hdr_type &&
+ r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs);
+}
+
+/**
* ice_rem_rss_list - remove RSS configuration from list
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
static void
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
* remove from the RSS entry list of the VSI context and delete entry.
*/
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
clear_bit(vsi_handle, r->vsis);
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
list_del(&r->l_entry);
@@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
set_bit(vsi_handle, r->vsis);
return 0;
}
@@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return -ENOMEM;
- rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
- rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hdr_type = hdr_type;
+ rss_cfg->hash.symm = prof->symm;
set_bit(vsi_handle, rss_cfg->vsis);
list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
@@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
return 0;
}
-#define ICE_FLOW_PROF_HASH_S 0
-#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
-#define ICE_FLOW_PROF_HDR_S 32
-#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
-#define ICE_FLOW_PROF_ENCAP_S 63
-#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+/**
+ * ice_rss_config_xor_word - set the HSYMM registers for one input set word
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ *
+ * Write to the HSYMM register with the index of @src FV the value of the @dst
+ * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst]
+ * while calculating the RSS input set.
+ */
+static void
+ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
+{
+ u32 val, reg, bits_shift;
+ u8 reg_idx;
+
+ reg_idx = src / GLQF_HSYMM_REG_SIZE;
+ bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3);
+ val = dst | GLQF_HSYMM_ENABLE_BIT;
+
+ reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx));
+ reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift);
+ wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg);
+}
-#define ICE_RSS_OUTER_HEADERS 1
-#define ICE_RSS_INNER_HEADERS 2
+/**
+ * ice_rss_config_xor - set the symmetric registers for a profile's protocol
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ * @len: length of the source/destination fields in words
+ */
+static void
+ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
+{
+ int fv_last_word =
+ ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ice_rss_config_xor_word(hw, prof_id,
+ /* Yes, field vector in GLQF_HSYMM and
+ * GLQF_HINSET is inversed!
+ */
+ fv_last_word - (src + i),
+ fv_last_word - (dst + i));
+ ice_rss_config_xor_word(hw, prof_id,
+ fv_last_word - (dst + i),
+ fv_last_word - (src + i));
+ }
+}
-/* Flow profile ID format:
- * [0:31] - Packet match fields
- * [32:62] - Protocol header
- * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+/**
+ * ice_rss_set_symm - set the symmetric settings for an RSS profile
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ *
+ * The symmetric hash will result from XORing the protocol's fields with
+ * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's
+ * GLQF_HSYMM registers.
*/
-#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
- ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
- (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
- ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
+static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+
+ if (!map)
+ return;
+
+ /* clear to default */
+ for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (prof->symm) {
+ struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst;
+ struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst;
+ struct ice_flow_seg_xtrct *sctp_src, *sctp_dst;
+ struct ice_flow_seg_xtrct *tcp_src, *tcp_dst;
+ struct ice_flow_seg_xtrct *udp_src, *udp_dst;
+ struct ice_flow_seg_info *seg;
+
+ seg = &prof->segs[prof->segs_cnt - 1];
+
+ ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
+ ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
+
+ ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
+ ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
+
+ tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
+ tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
+
+ udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
+ udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
+
+ sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
+ sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
+
+ /* xor IPv4 */
+ if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv4_src->idx, ipv4_dst->idx, 2);
+
+ /* xor IPv6 */
+ if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv6_src->idx, ipv6_dst->idx, 8);
+
+ /* xor TCP */
+ if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ tcp_src->idx, tcp_dst->idx, 1);
+
+ /* xor UDP */
+ if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ udp_src->idx, udp_dst->idx, 1);
+
+ /* xor SCTP */
+ if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ sctp_src->idx, sctp_dst->idx, 1);
+ }
+}
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
+ u8 segs_cnt;
int status;
- if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return -EINVAL;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto exit;
- /* Search for a flow profile that has matching headers, hash fields
- * and has the input VSI associated to it. If found, no further
+ /* Search for a flow profile that has matching headers, hash fields,
+ * symm and has the input VSI associated to it. If found, no further
* operations required and exit.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof)
goto exit;
@@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* the protocol header and new hash field configuration.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof) {
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
if (!status)
@@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
}
}
- /* Search for a profile that has same match fields only. If this
- * exists then associate the VSI to this profile.
+ /* Search for a profile that has the same match fields and symmetric
+ * setting. If this exists then associate the VSI to this profile.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (prof) {
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
@@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
goto exit;
}
- /* Create a new flow profile with generated profile and packet
- * segment information.
- */
+ /* Create a new flow profile with packet segment information. */
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
- ICE_FLOW_GEN_PROFID(hashed_flds,
- segs[segs_cnt - 1].hdrs,
- segs_cnt),
- segs, segs_cnt, &prof);
+ segs, segs_cnt, cfg->symm, &prof);
if (status)
goto exit;
+ prof->symm = cfg->symm;
+ ice_rss_set_symm(hw, prof);
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
/* If association to a new flow profile failed then this profile can
* be removed.
@@ -2146,30 +2323,43 @@ exit:
/**
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
+ * @vsi: VSI to add the RSS configuration to
+ * @cfg: configure parameters
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
+ u16 vsi_handle;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
+ u8 segs_cnt;
int status;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto out;
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = -ENOENT;
@@ -2233,31 +2423,39 @@ out:
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
+ * @cfg: configure parameters
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
* that profile and find entry associated with input VSI to be
- * removed. Calls are made to underlying flow s which will APIs
+ * removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
-int __maybe_unused
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+int
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
+ * @vsi: VF's VSI
* @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
{
+ struct ice_rss_hash_cfg hcfg;
+ u16 vsi_handle;
int status = 0;
u64 hash_flds;
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
@@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (rss_hash == ICE_HASH_INVALID)
return -EIO;
- status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
- ICE_FLOW_SEG_HDR_NONE);
+ hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ hcfg.hash_flds = rss_hash;
+ hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ hcfg.symm = false;
+ status = ice_add_rss_cfg(hw, vsi, &hcfg);
if (status)
break;
}
@@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
return status;
}
+static bool rss_cfg_symm_valid(u64 hfld)
+{
+ return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)));
+}
+
+/**
+ * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations
+ * @hw: pointer to the hardware structure
+ * @vsi: VSI to set/unset Symmetric RSS
+ * @symm: TRUE to set Symmetric RSS hashing
+ */
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm)
+{
+ struct ice_rss_hash_cfg local;
+ struct ice_rss_cfg *r, *tmp;
+ u16 vsi_handle = vsi->idx;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) {
+ local = r->hash;
+ local.symm = symm;
+ if (symm && !rss_cfg_symm_valid(r->hash.hash_flds))
+ continue;
+
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
/**
* ice_replay_rss_cfg - replay RSS configurations associated with VSI
* @hw: pointer to the hardware structure
@@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
if (test_bit(vsi_handle, r->vsis)) {
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_OUTER_HEADERS);
- if (status)
- break;
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_INNER_HEADERS);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
if (status)
break;
}
@@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hdrs: protocol header type
+ * @symm: whether the RSS is symmetric (bool, output)
*
* This function will return the match fields of the first instance of flow
* profile having the given header types and containing input VSI
*/
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm)
{
u64 rss_hash = ICE_HASH_INVALID;
struct ice_rss_cfg *r;
@@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
- r->packet_hdr == hdrs) {
- rss_hash = r->hashed_flds;
+ r->hash.addl_hdrs == hdrs) {
+ rss_hash = r->hash.hash_flds;
+ *symm = r->hash.symm;
break;
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 96923ef0a5a8..ff82915ab497 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -34,6 +34,8 @@
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_FLOW_HASH_GTP_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@@ -227,6 +229,19 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_MAX
};
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
@@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+enum ice_rss_cfg_hdr_type {
+ ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
+ ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
+ /* take inner headers as inputset for packet with outer ipv4. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
+ /* take inner headers as inputset for packet with outer ipv6. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
+ /* take outer headers first then inner headers as inputset */
+ ICE_RSS_ANY_HEADERS
+};
+
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs; /* protocol header fields */
+ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
+ enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
+ bool symm; /* symmetric or asymmetric hash */
+};
+
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
@@ -289,8 +322,10 @@ enum ice_flow_priority {
ICE_FLOW_PRIO_HIGH
};
+#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -372,20 +407,21 @@ struct ice_flow_prof {
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+
+ bool symm; /* Symmetric Hash for RSS */
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
- u64 hashed_flds;
- u32 packet_hdr;
+ struct ice_rss_hash_cfg hash;
};
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof);
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
@@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ u64 hashed_flds);
int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-int
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg);
+int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
new file mode 100644
index 000000000000..92b5dac481cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+#include "ice.h"
+#include "ice_common.h"
+#include "ice_fwlog.h"
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+void ice_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = ICE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += ICE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * ice_fwlog_realloc_rings - reallocate the FW log rings
+ * @hw: pointer to the HW structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
+{
+ struct ice_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
+ if (ring_size == hw->fwlog_ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = ice_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+
+ hw->fwlog_ring.rings = ring.rings;
+ hw->fwlog_ring.size = ring.size;
+ hw->fwlog_ring.index = index;
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+}
+
+/**
+ * ice_fwlog_init - Initialize FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called on driver initialization during
+ * ice_init_hw().
+ */
+int ice_fwlog_init(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ ice_fwlog_set_supported(hw);
+
+ if (ice_fwlog_supported(hw)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = ice_fwlog_get(hw, &hw->fwlog_cfg);
+ if (status)
+ return status;
+
+ hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*hw->fwlog_ring.rings),
+ GFP_KERNEL);
+ if (!hw->fwlog_ring.rings) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
+ hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ return status;
+ }
+
+ ice_debugfs_fwlog_init(hw->back);
+ } else {
+ dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fwlog_deinit - unroll FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called in ice_deinit_hw().
+ */
+void ice_fwlog_deinit(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ int status;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+ status = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(pf->ice_debugfs_pf_fwlog_modules);
+
+ pf->ice_debugfs_pf_fwlog_modules = NULL;
+
+ status = ice_fwlog_unregister(hw);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (hw->fwlog_ring.rings) {
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ }
+}
+
+/**
+ * ice_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @hw: pointer to the HW structure
+ *
+ * This will always return false if called before ice_init_hw(), so it must be
+ * called after ice_init_hw().
+ */
+bool ice_fwlog_supported(struct ice_hw *hw)
+{
+ return hw->fwlog_supported;
+}
+
+/**
+ * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @hw: pointer to the HW structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from ice_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
+ u16 num_entries, u16 options, u16 log_resolution)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & ICE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & ICE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = ice_aq_send_cmd(hw, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries,
+ NULL);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set - Set the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
+ * for init.
+ */
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_set(hw, cfg->module_entries,
+ ICE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @hw: pointer to the HW structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
+
+ status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ ICE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_fwlog_get - Get the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config to populate based on current firmware logging settings
+ */
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_get(hw, cfg);
+}
+
+/**
+ * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @hw: pointer to the HW structure
+ * @reg: true to register and false to unregister
+ */
+static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
+
+ if (reg)
+ desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_fwlog_register - Register the PF for firmware logging
+ * @hw: pointer to the HW structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in ice_fwlog_set.
+ */
+int ice_fwlog_register(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_unregister - Unregister the PF from firmware logging
+ * @hw: pointer to the HW structure
+ */
+int ice_fwlog_unregister(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, false);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set_supported - Set if FW logging is supported by FW
+ * @hw: pointer to the HW struct
+ *
+ * If FW returns success to the ice_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+void ice_fwlog_set_supported(struct ice_hw *hw)
+{
+ struct ice_fwlog_cfg *cfg;
+ int status;
+
+ hw->fwlog_supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ /* don't call ice_fwlog_get() because that would check to see if FW
+ * logging is supported which is what the driver is determining now
+ */
+ status = ice_aq_fwlog_get(hw, cfg);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ hw->fwlog_supported = true;
+
+ kfree(cfg);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
new file mode 100644
index 000000000000..287e71fa4b86
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_FWLOG_H_
+#define _ICE_FWLOG_H_
+#include "ice_adminq_cmd.h"
+
+struct ice_hw;
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
+ */
+enum ice_fwlog_level {
+ ICE_FWLOG_LEVEL_NONE = 0,
+ ICE_FWLOG_LEVEL_ERROR = 1,
+ ICE_FWLOG_LEVEL_WARNING = 2,
+ ICE_FWLOG_LEVEL_NORMAL = 3,
+ ICE_FWLOG_LEVEL_VERBOSE = 4,
+ ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ice_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ice_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ice_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ice_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct ice_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ice_fwlog_ring {
+ struct ice_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define ICE_FWLOG_RING_SIZE_DFLT 256
+#define ICE_FWLOG_RING_SIZE_MAX 512
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
+void ice_fwlog_ring_increment(u16 *item, u16 size);
+void ice_fwlog_set_supported(struct ice_hw *hw);
+bool ice_fwlog_supported(struct ice_hw *hw);
+int ice_fwlog_init(struct ice_hw *hw);
+void ice_fwlog_deinit(struct ice_hw *hw);
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_register(struct ice_hw *hw);
+int ice_fwlog_unregister(struct ice_hw *hw);
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
+#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 86936b758ade..cfac1d432c15 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -200,6 +200,8 @@
#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
#define GLINT_VECT2FUNC_IS_PF_S 16
#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
+#define PFINT_ALLOC 0x001D2600
+#define PFINT_ALLOC_FIRST ICE_M(0x7FF, 0)
#define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_FW_CTL_ITR_INDX_S 11
@@ -404,6 +406,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_HSYMM_REG_SIZE 4
+#define GLQF_HSYMM_REG_PER_PROF 6
+#define GLQF_HSYMM_ENABLE_BIT BIT(7)
#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
new file mode 100644
index 000000000000..e4c2c1bff6c0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_hwmon.h"
+#include "ice_adminq_cmd.h"
+
+#include <linux/hwmon.h>
+
+#define TEMP_FROM_REG(reg) ((reg) * 1000)
+
+static const struct hwmon_channel_info *ice_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_EMERGENCY),
+ NULL
+};
+
+static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct ice_aqc_get_sensor_reading_resp resp;
+ struct ice_pf *pf = dev_get_drvdata(dev);
+ int ret;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ ret = ice_aq_get_sensor_reading(&pf->hw, &resp);
+ if (ret) {
+ dev_warn_ratelimited(dev,
+ "%s HW read failure (%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp);
+ break;
+ case hwmon_temp_max:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold);
+ break;
+ case hwmon_temp_crit:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold);
+ break;
+ case hwmon_temp_emergency:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold);
+ break;
+ default:
+ dev_dbg(dev, "%s unsupported attribute (%d)\n",
+ __func__, attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t ice_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit:
+ case hwmon_temp_max:
+ case hwmon_temp_emergency:
+ return 0444;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops ice_hwmon_ops = {
+ .is_visible = ice_hwmon_is_visible,
+ .read = ice_hwmon_read
+};
+
+static const struct hwmon_chip_info ice_chip_info = {
+ .ops = &ice_hwmon_ops,
+ .info = ice_hwmon_info
+};
+
+static bool ice_is_internal_reading_supported(struct ice_pf *pf)
+{
+ /* Only the first PF will report temperature for a chip.
+ * Note that internal temp reading is not supported
+ * for older FW (< v4.30).
+ */
+ if (pf->hw.pf_id)
+ return false;
+
+ unsigned long sensors = pf->hw.dev_caps.supported_sensors;
+
+ return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+};
+
+void ice_hwmon_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct device *hdev;
+
+ if (!ice_is_internal_reading_supported(pf))
+ return;
+
+ hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info,
+ NULL);
+ if (IS_ERR(hdev)) {
+ dev_warn(dev,
+ "hwmon_device_register_with_info returns error (%ld)",
+ PTR_ERR(hdev));
+ return;
+ }
+ pf->hwmon_dev = hdev;
+}
+
+void ice_hwmon_exit(struct ice_pf *pf)
+{
+ if (!pf->hwmon_dev)
+ return;
+ hwmon_device_unregister(pf->hwmon_dev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h
new file mode 100644
index 000000000000..d66d40354f5a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_HWMON_H_
+#define _ICE_HWMON_H_
+
+#ifdef CONFIG_ICE_HWMON
+void ice_hwmon_init(struct ice_pf *pf);
+void ice_hwmon_exit(struct ice_pf *pf);
+#else /* CONFIG_ICE_HWMON */
+static inline void ice_hwmon_init(struct ice_pf *pf) { }
+static inline void ice_hwmon_exit(struct ice_pf *pf) { }
+#endif /* CONFIG_ICE_HWMON */
+
+#endif /* _ICE_HWMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index b47cd43ae871..467372d541d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
}
/**
+ * ice_pkg_has_lport_extract - check if lport extraction supported
+ * @hw: HW struct
+ */
+static bool ice_pkg_has_lport_extract(struct ice_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) {
+ u16 offset;
+ u8 fv_prot;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i,
+ &fv_prot, &offset);
+ if (fv_prot == ICE_FV_PROT_MDID &&
+ offset == ICE_LP_EXT_BUF_OFFSET)
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_lag_find_primary - returns pointer to primary interfaces lag struct
* @lag: local interfaces lag struct
*/
@@ -208,8 +229,7 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
eth_hdr = s_rule->hdr_data;
ice_fill_eth_hdr(eth_hdr);
- act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num);
s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
s_rule->recipe_id = cpu_to_le16(recipe_id);
@@ -754,9 +774,7 @@ ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
ICE_SINGLE_ACT_LAN_ENABLE |
ICE_SINGLE_ACT_VALID_BIT |
- ((vsi->vsi_num <<
- ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M));
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -1209,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
}
/**
- * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG
* @pf: PF struct
*/
static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
@@ -1222,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
else
ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
- if (caps->sriov_lag)
+ if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw))
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index ede833dfa658..183b38792ef2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -17,6 +17,9 @@ enum ice_lag_role {
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
+#define ICE_SW_DEFAULT_PROFILE 0
+#define ICE_FV_PROT_MDID 255
+#define ICE_LP_EXT_BUF_OFFSET 32
struct ice_pf;
struct ice_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 89f986a75cc8..d384ddfcb83e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -673,6 +673,212 @@ struct ice_tlan_ctx {
* Use the enum ice_rx_l2_ptype to decode the packet type
* ENDIF
*/
+#define ICE_PTYPES \
+ /* L2 Packet types */ \
+ ICE_PTT_UNUSED_ENTRY(0), \
+ ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
+ ICE_PTT_UNUSED_ENTRY(2), \
+ ICE_PTT_UNUSED_ENTRY(3), \
+ ICE_PTT_UNUSED_ENTRY(4), \
+ ICE_PTT_UNUSED_ENTRY(5), \
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT_UNUSED_ENTRY(8), \
+ ICE_PTT_UNUSED_ENTRY(9), \
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT_UNUSED_ENTRY(12), \
+ ICE_PTT_UNUSED_ENTRY(13), \
+ ICE_PTT_UNUSED_ENTRY(14), \
+ ICE_PTT_UNUSED_ENTRY(15), \
+ ICE_PTT_UNUSED_ENTRY(16), \
+ ICE_PTT_UNUSED_ENTRY(17), \
+ ICE_PTT_UNUSED_ENTRY(18), \
+ ICE_PTT_UNUSED_ENTRY(19), \
+ ICE_PTT_UNUSED_ENTRY(20), \
+ ICE_PTT_UNUSED_ENTRY(21), \
+ \
+ /* Non Tunneled IPv4 */ \
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(25), \
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> IPv4 */ \
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(32), \
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> IPv6 */ \
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(39), \
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT */ \
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 --> GRE/NAT --> IPv4 */ \
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(47), \
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> IPv6 */ \
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(54), \
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC */ \
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(62), \
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(69), \
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */ \
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(77), \
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(84), \
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* Non Tunneled IPv6 */ \
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(91), \
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> IPv4 */ \
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(98), \
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> IPv6 */ \
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(105), \
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT */ \
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> IPv4 */ \
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(113), \
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> IPv6 */ \
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(120), \
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC */ \
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(128), \
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(135), \
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */ \
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(143), \
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(150), \
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+#define ICE_NUM_DEFINED_PTYPES 154
/* macro to make the table lines short, use explicit indexing with [PTYPE] */
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
@@ -695,212 +901,10 @@ struct ice_tlan_ctx {
/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- /* L2 Packet types */
- ICE_PTT_UNUSED_ENTRY(0),
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- ICE_PTT_UNUSED_ENTRY(2),
- ICE_PTT_UNUSED_ENTRY(3),
- ICE_PTT_UNUSED_ENTRY(4),
- ICE_PTT_UNUSED_ENTRY(5),
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(8),
- ICE_PTT_UNUSED_ENTRY(9),
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(12),
- ICE_PTT_UNUSED_ENTRY(13),
- ICE_PTT_UNUSED_ENTRY(14),
- ICE_PTT_UNUSED_ENTRY(15),
- ICE_PTT_UNUSED_ENTRY(16),
- ICE_PTT_UNUSED_ENTRY(17),
- ICE_PTT_UNUSED_ENTRY(18),
- ICE_PTT_UNUSED_ENTRY(19),
- ICE_PTT_UNUSED_ENTRY(20),
- ICE_PTT_UNUSED_ENTRY(21),
-
- /* Non Tunneled IPv4 */
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(25),
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(32),
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(39),
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(47),
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(54),
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(62),
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(69),
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(77),
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(84),
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(91),
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(98),
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(105),
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(113),
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(120),
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(128),
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(135),
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(143),
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(150),
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+ ICE_PTYPES
/* unused entries */
- [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1bad6e17f9be..fc23dbe302b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -212,11 +212,18 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq));
break;
case ICE_VSI_SWITCHDEV_CTRL:
- /* The number of queues for ctrl VSI is equal to number of VFs.
+ /* The number of queues for ctrl VSI is equal to number of PRs
* Each ring is associated to the corresponding VF_PR netdev.
+ * Tx and Rx rings are always equal
*/
- vsi->alloc_txq = ice_get_num_vfs(pf);
- vsi->alloc_rxq = vsi->alloc_txq;
+ if (vsi->req_txq && vsi->req_rxq) {
+ vsi->alloc_txq = vsi->req_txq;
+ vsi->alloc_rxq = vsi->req_rxq;
+ } else {
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ }
+
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
@@ -519,16 +526,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
- struct ice_vf *vf;
- unsigned int bkt;
+ struct ice_repr *repr;
+ unsigned long id;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf)
- napi_schedule(&vf->repr->q_vector->napi);
- rcu_read_unlock();
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ napi_schedule(&repr->q_vector->napi);
return IRQ_HANDLED;
}
@@ -969,9 +974,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
/* allow all untagged/tagged packets by default on Tx */
- ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
- ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
- ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
* results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
*
@@ -979,15 +983,14 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
*/
if (ice_is_dvm_ena(hw)) {
ctxt->info.inner_vlan_flags |=
- ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
ctxt->info.outer_vlan_flags =
- (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
ctxt->info.outer_vlan_flags |=
- (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
- ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
+ ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
ctxt->info.outer_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
@@ -1066,10 +1069,8 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
offset += num_rxq_per_tc;
tx_count += num_txq_per_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
@@ -1152,18 +1153,14 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.max_fd_fltr_shared =
cpu_to_le16(vsi->num_bfltr);
/* default queue index within the VSI of the default FD */
- val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
- ICE_AQ_VSI_FD_DEF_Q_M);
+ val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
/* target queue or queue group to the FD filter */
- val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
- ICE_AQ_VSI_FD_DEF_GRP_M);
+ val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
ctxt->info.fd_def_q = cpu_to_le16(val);
/* queue index on which FD filter completion is reported */
- val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
- ICE_AQ_VSI_FD_REPORT_Q_M);
+ val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
/* priority of the default qindex action */
- val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
- ICE_AQ_VSI_FD_DEF_PRIORITY_M);
+ val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
ctxt->info.fd_report_opt = cpu_to_le16(val);
}
@@ -1186,12 +1183,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
dev_dbg(dev, "Unsupported VSI type %s\n",
@@ -1199,9 +1194,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
return;
}
- ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ vsi->rss_hfunc = hash_type;
+
+ ctxt->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
}
static void
@@ -1215,10 +1213,8 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
pow = order_base_2(qcount);
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
@@ -1600,12 +1596,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
+static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
+ /* configure RSS for IPv4 with input set IP src/dst */
+ {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp4 with input set IP src/dst - only support
+ * RSS on SCTPv4 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp6 with input set IPv6 src/dst - only support
+ * RSS on SCTPv6 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
+ {ICE_FLOW_SEG_HDR_ESP,
+ ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+};
+
/**
* ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
* @vsi: VSI to be configured
@@ -1619,11 +1647,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
*/
static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
- u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ u16 vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct device *dev;
int status;
+ u32 i;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1631,67 +1660,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
vsi_num);
return;
}
- /* configure RSS for IPv4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for IPv6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
+ const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
- /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
- ICE_FLOW_SEG_HDR_ESP);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ status = ice_add_rss_cfg(hw, vsi, cfg);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
+ cfg->addl_hdrs, cfg->hash_flds,
+ cfg->hdr_type, cfg->symm);
+ }
}
/**
@@ -1808,11 +1785,8 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
+ regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
+ regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
if (ena_ts)
/* Enable TimeSync on this queue */
@@ -2450,6 +2424,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -2926,6 +2904,123 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
+ * __ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue. Caller indicates the lock status.
+ */
+static void
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
+{
+ if (!locked)
+ rtnl_lock();
+ netif_queue_set_napi(dev, queue_index, type, napi);
+ if (!locked)
+ rtnl_unlock();
+}
+
+/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (!vsi->netdev)
+ return;
+
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3070,27 +3165,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
- * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
* @vsi: VSI pointer
- * @prev_txq: Number of Tx rings before ring reallocation
- * @prev_rxq: Number of Rx rings before ring reallocation
*/
-static void
-ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+static int
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
{
+ u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
+ u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
struct ice_vsi_stats *vsi_stat;
struct ice_pf *pf = vsi->back;
+ u16 prev_txq = vsi->alloc_txq;
+ u16 prev_rxq = vsi->alloc_rxq;
int i;
- if (!prev_txq || !prev_rxq)
- return;
- if (vsi->type == ICE_VSI_CHNL)
- return;
-
vsi_stat = pf->vsi_stats[vsi->idx];
- if (vsi->num_txq < prev_txq) {
- for (i = vsi->num_txq; i < prev_txq; i++) {
+ if (req_txq < prev_txq) {
+ for (i = req_txq; i < prev_txq; i++) {
if (vsi_stat->tx_ring_stats[i]) {
kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
@@ -3098,14 +3192,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
}
}
- if (vsi->num_rxq < prev_rxq) {
- for (i = vsi->num_rxq; i < prev_rxq; i++) {
+ tx_ring_stats = vsi_stat->tx_ring_stats;
+ vsi_stat->tx_ring_stats =
+ krealloc_array(vsi_stat->tx_ring_stats, req_txq,
+ sizeof(*vsi_stat->tx_ring_stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vsi_stat->tx_ring_stats) {
+ vsi_stat->tx_ring_stats = tx_ring_stats;
+ return -ENOMEM;
+ }
+
+ if (req_rxq < prev_rxq) {
+ for (i = req_rxq; i < prev_rxq; i++) {
if (vsi_stat->rx_ring_stats[i]) {
kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
}
}
}
+
+ rx_ring_stats = vsi_stat->rx_ring_stats;
+ vsi_stat->rx_ring_stats =
+ krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
+ sizeof(*vsi_stat->rx_ring_stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vsi_stat->rx_ring_stats) {
+ vsi_stat->rx_ring_stats = rx_ring_stats;
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
@@ -3122,9 +3238,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
struct ice_pf *pf;
+ int ret;
if (!vsi)
return -EINVAL;
@@ -3143,8 +3259,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
- prev_txq = vsi->num_txq;
- prev_rxq = vsi->num_rxq;
+ ret = ice_vsi_realloc_stat_arrays(vsi);
+ if (ret)
+ goto err_vsi_cfg;
ice_vsi_decfg(vsi);
ret = ice_vsi_cfg_def(vsi, &params);
@@ -3162,8 +3279,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
- ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
-
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
@@ -3315,9 +3430,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
pow = order_base_2(tc0_qcount);
- qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index f24f5d1e6f9c..bfcfc582a4c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -91,6 +91,16 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index adfdea1e2805..df6a68ab747e 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,6 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
+#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
* ice driver.
@@ -979,7 +980,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
* Octets 13 - 20 are TSA values - leave as zeros
*/
buf[5] = 0x64;
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += len + 2;
tlv = (struct ice_lldp_org_tlv *)
((char *)tlv + sizeof(tlv->typelen) + len);
@@ -1013,7 +1014,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
/* Octet 1 left as all zeros - PFC disabled */
buf[0] = 0x08;
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += len + 2;
if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
@@ -1252,6 +1253,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
+ * ice_get_fwlog_data - copy the FW log data from ARQ event
+ * @pf: PF that the FW log event is associated with
+ * @event: event structure containing FW log data
+ */
+static void
+ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_fwlog_data *fwlog;
+ struct ice_hw *hw = &pf->hw;
+
+ fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
+
+ memset(fwlog->data, 0, PAGE_SIZE);
+ fwlog->data_size = le16_to_cpu(event->desc.datalen);
+
+ memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
+ ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
+
+ if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
+ /* the rings are full so bump the head to create room */
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+}
+
+/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1532,8 +1559,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vc_process_vf_msg(pf, &event, &data);
break;
- case ice_aqc_opc_fw_logging:
- ice_output_fw_log(hw, &event.desc, event.msg_buf);
+ case ice_aqc_opc_fw_logs_event:
+ ice_get_fwlog_data(pf, &event);
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
@@ -1744,14 +1771,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
- u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
- GL_MDET_TX_PQM_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
- GL_MDET_TX_PQM_VF_NUM_S;
- u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
- GL_MDET_TX_PQM_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
- GL_MDET_TX_PQM_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
@@ -1761,14 +1784,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & GL_MDET_TX_TCLAN_VALID_M) {
- u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
- GL_MDET_TX_TCLAN_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
- GL_MDET_TX_TCLAN_VF_NUM_S;
- u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
- GL_MDET_TX_TCLAN_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
- GL_MDET_TX_TCLAN_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
@@ -1778,14 +1797,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, GL_MDET_RX);
if (reg & GL_MDET_RX_VALID_M) {
- u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
- GL_MDET_RX_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
- GL_MDET_RX_VF_NUM_S;
- u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
- GL_MDET_RX_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
- GL_MDET_RX_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
@@ -3031,6 +3046,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static void ice_ena_misc_vector(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ u32 pf_intr_start_offset;
u32 val;
/* Disable anti-spoof detection interrupt to prevent spurious event
@@ -3059,6 +3075,47 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
/* SW_ITR_IDX = 0, but don't change INTENA */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+
+ if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ return;
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
+ GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+}
+
+/**
+ * ice_ll_ts_intr - ll_ts interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
+{
+ struct ice_pf *pf = data;
+ u32 pf_intr_start_offset;
+ struct ice_ptp_tx *tx;
+ unsigned long flags;
+ struct ice_hw *hw;
+ u32 val;
+ u8 idx;
+
+ hw = &pf->hw;
+ tx = &pf->ptp.port.tx;
+ spin_lock_irqsave(&tx->lock, flags);
+ ice_ptp_complete_tx_single_tstamp(tx);
+
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
+ val);
+
+ return IRQ_HANDLED;
}
/**
@@ -3069,6 +3126,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
{
struct ice_pf *pf = (struct ice_pf *)data;
+ irqreturn_t ret = IRQ_HANDLED;
struct ice_hw *hw = &pf->hw;
struct device *dev;
u32 oicr, ena_mask;
@@ -3108,8 +3166,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
- reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
- GLGEN_RSTAT_RESET_TYPE_S;
+ reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
+ rd32(hw, GLGEN_RSTAT));
if (reset == ICE_RESET_CORER)
pf->corer_count++;
@@ -3150,8 +3208,22 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
+ if (ice_pf_state_is_nominal(pf) &&
+ pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ unsigned long flags;
+ u8 idx;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock_irqrestore(&tx->lock, flags);
+ } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ ret = IRQ_WAKE_THREAD;
+ }
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3167,7 +3239,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
GLTSYN_STAT_EVENT1_M |
GLTSYN_STAT_EVENT2_M);
- set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
+ ice_ptp_extts_event(pf);
}
}
@@ -3190,8 +3262,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(ICE_PFR_REQ, pf->state);
}
}
+ ice_service_task_schedule(pf);
+ if (ret == IRQ_HANDLED)
+ ice_irq_dynamic_ena(hw, NULL, NULL);
- return IRQ_WAKE_THREAD;
+ return ret;
}
/**
@@ -3207,12 +3282,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
hw = &pf->hw;
if (ice_is_reset_in_progress(pf->state))
- return IRQ_HANDLED;
-
- ice_service_task_schedule(pf);
-
- if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
- ice_ptp_extts_event(pf);
+ goto skip_irq;
if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
/* Process outstanding Tx timestamps. If there is more work,
@@ -3224,6 +3294,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
}
}
+skip_irq:
ice_irq_dynamic_ena(hw, NULL, NULL);
return IRQ_HANDLED;
@@ -3254,6 +3325,20 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
}
/**
+ * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
+ * @pf: board private structure
+ */
+static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
+{
+ int irq_num = pf->ll_ts_irq.virq;
+
+ synchronize_irq(irq_num);
+ devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
+
+ ice_free_irq(pf, pf->ll_ts_irq);
+}
+
+/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
@@ -3272,6 +3357,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
ice_free_irq(pf, pf->oicr_irq);
+ if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ ice_free_irq_msix_ll_ts(pf);
}
/**
@@ -3297,10 +3384,12 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
- /* This enables Sideband queue Interrupt causes */
- val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
- PFINT_SB_CTL_CAUSE_ENA_M);
- wr32(hw, PFINT_SB_CTL, val);
+ if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ /* enable Sideband queue Interrupt causes */
+ val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
+ PFINT_SB_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL, val);
+ }
ice_flush(hw);
}
@@ -3317,13 +3406,17 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- struct msi_map oicr_irq;
+ u32 pf_intr_start_offset;
+ struct msi_map irq;
int err = 0;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
dev_driver_string(dev), dev_name(dev));
+ if (!pf->int_name_ll_ts[0])
+ snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
+ "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset. Note that this function is called only during
* rebuild path and not while reset is in progress.
@@ -3332,11 +3425,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
goto skip_req_irq;
/* reserve one vector in irq_tracker for misc interrupts */
- oicr_irq = ice_alloc_irq(pf, false);
- if (oicr_irq.index < 0)
- return oicr_irq.index;
+ irq = ice_alloc_irq(pf, false);
+ if (irq.index < 0)
+ return irq.index;
- pf->oicr_irq = oicr_irq;
+ pf->oicr_irq = irq;
err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
ice_misc_intr_thread_fn, 0,
pf->int_name, pf);
@@ -3347,10 +3440,34 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return err;
}
+ /* reserve one vector in irq_tracker for ll_ts interrupt */
+ if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ goto skip_req_irq;
+
+ irq = ice_alloc_irq(pf, false);
+ if (irq.index < 0)
+ return irq.index;
+
+ pf->ll_ts_irq = irq;
+ err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
+ pf->int_name_ll_ts, pf);
+ if (err) {
+ dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ pf->int_name_ll_ts, err);
+ ice_free_irq(pf, pf->ll_ts_irq);
+ return err;
+ }
+
skip_req_irq:
ice_ena_misc_vector(pf);
ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
+ /* This enables LL TS interrupt */
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ wr32(hw, PFINT_SB_CTL,
+ ((pf->ll_ts_irq.index + pf_intr_start_offset) &
+ PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
@@ -3375,9 +3492,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, v_idx)
+ ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ }
}
/**
@@ -3397,6 +3516,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
+ netdev->xdp_metadata_ops = &ice_xdp_md_ops;
ice_set_ethtool_ops(netdev);
if (vsi->type != ICE_VSI_PF)
@@ -4361,6 +4481,19 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
+ * ice_pf_fwlog_update_module - update 1 module
+ * @pf: pointer to the PF struct
+ * @log_level: log_level to use for the @module
+ * @module: module to update
+ */
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ hw->fwlog_cfg.module_entries[module].log_level = log_level;
+}
+
+/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4685,6 +4818,8 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_init_lag(pf))
dev_warn(dev, "Failed to init link aggregation support\n");
+
+ ice_hwmon_init(pf);
}
static void ice_deinit_features(struct ice_pf *pf)
@@ -4702,6 +4837,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_ptp_release(pf);
if (test_bit(ICE_FLAG_DPLL, pf->flags))
ice_dpll_deinit(pf);
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ xa_destroy(&pf->eswitch.reprs);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5203,11 +5340,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ ice_debugfs_exit();
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
+ ice_hwmon_exit(pf);
+
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
set_bit(ICE_DOWN, pf->state);
@@ -5306,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
@@ -5672,6 +5814,8 @@ static int __init ice_module_init(void)
goto err_dest_wq;
}
+ ice_debugfs_init();
+
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
@@ -5682,6 +5826,7 @@ static int __init ice_module_init(void)
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
+ ice_debugfs_exit();
err_dest_wq:
destroy_workqueue(ice_wq);
return status;
@@ -6041,6 +6186,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
+ * @vsi: PF's VSI
+ * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
+ *
+ * Store current stripped VLAN proto in ring packet context,
+ * so it can be accessed more efficiently by packet processing code.
+ */
+static void
+ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
+{
+ u16 i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
+}
+
+/**
* ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
* @vsi: PF's VSI
* @features: features used to determine VLAN offload settings
@@ -6082,6 +6244,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
if (strip_err || insert_err)
return -EIO;
+ ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
+ htons(vlan_ethertype) : 0);
+
return 0;
}
@@ -6670,13 +6835,11 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes +
- pf->stats.rx_len_errors +
pf->stats.rx_undersize +
pf->hw_csum_rx_error +
pf->stats.rx_jabber +
pf->stats.rx_fragments +
pf->stats.rx_oversize;
- cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
}
@@ -6816,9 +6979,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
- ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
- &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
-
ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
@@ -7401,9 +7561,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
+ err = ice_eswitch_rebuild(pf);
if (err) {
- dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
+ dev_err(dev, "Switchdev rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
@@ -7704,6 +7864,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
}
/**
+ * ice_set_rss_hfunc - Set RSS HASH function
+ * @vsi: Pointer to VSI structure
+ * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctx;
+ bool symm;
+ int err;
+
+ if (hfunc == vsi->rss_hfunc)
+ return 0;
+
+ if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
+ hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ return -EOPNOTSUPP;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss;
+ ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ ctx->info.q_opt_rss |=
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
+ vsi->vsi_num, err);
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ vsi->rss_hfunc = hfunc;
+ netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
+ hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
+ "Symmetric " : "");
+ }
+ kfree(ctx);
+ if (err)
+ return err;
+
+ /* Fix the symmetry setting for all existing RSS configurations */
+ symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ return ice_set_rss_cfg_symm(hw, vsi, symm);
+}
+
+/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
* @pid: process ID
@@ -7800,6 +8013,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
@@ -7889,8 +8104,8 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
- head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
- QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
+ head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
+ rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
@@ -8138,13 +8353,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
enum ice_flow_priority prio;
- u64 prof_id;
/* add this VSI to FDir profile for this flow */
prio = ICE_FLOW_PRIO_NORMAL;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ status = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof->prof_id[tun],
prof->vsi_h[0], vsi->idx,
prio, prof->fdir_seg[tun],
&entry_h);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index f6f52a248066..d4e05d2cb30c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -571,8 +571,8 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
return status;
}
- nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+ nvm->major = FIELD_GET(ICE_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(ICE_NVM_VER_LO_MASK, ver);
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
@@ -706,9 +706,9 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
combo_ver = le32_to_cpu(civd.combo_ver);
- orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT);
- orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
- orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT);
+ orom->major = FIELD_GET(ICE_OROM_VER_MASK, combo_ver);
+ orom->patch = FIELD_GET(ICE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = FIELD_GET(ICE_OROM_VER_BUILD_MASK, combo_ver);
return 0;
}
@@ -950,7 +950,8 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/* Check that the control word indicates validity */
- if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
+ if (FIELD_GET(ICE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
return -EIO;
}
@@ -1027,7 +1028,7 @@ int ice_init_nvm(struct ice_hw *hw)
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
- sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
/* Switching to words (sr_size contains power of 2) */
flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index 82bc54fec7f3..a2562f04267f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -24,7 +24,7 @@
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
-#define ICE_M(m, s) ((m) << (s))
+#define ICE_M(m, s) ((m ## U) << (s))
struct ice_dma_mem {
void *va;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 71f405f8a6fe..3b6605c8585e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,7 +7,7 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E822 0x100000000ULL
+#define UNKNOWN_INCVAL_E82X 0x100000000ULL
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
@@ -525,6 +525,119 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
+ * @tx: the PTP Tx timestamp tracker
+ * @idx: index of the timestamp to request
+ */
+void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
+{
+ struct ice_ptp_port *ptp_port;
+ struct sk_buff *skb;
+ struct ice_pf *pf;
+
+ if (!tx->init)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ /* Drop packets which have waited for more than 2 seconds */
+ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
+ /* Count the number of Tx timestamps that timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ wr32(&pf->hw, PF_SB_ATQBAL,
+ TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
+ TS_LL_READ_TS);
+ tx->last_ll_ts_idx_read = idx;
+}
+
+/**
+ * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
+ * @tx: the PTP Tx timestamp tracker
+ */
+void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
+{
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_ptp_port *ptp_port;
+ u64 raw_tstamp, tstamp;
+ bool drop_ts = false;
+ struct sk_buff *skb;
+ struct ice_pf *pf;
+ u32 val;
+
+ if (!tx->init || tx->last_ll_ts_idx_read < 0)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ val = rd32(&pf->hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (val & TS_LL_READ_TS) {
+ dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
+ return;
+ }
+
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ raw_tstamp <<= 32;
+
+ /* Read the low 32 bit value */
+ raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+
+ /* For PHYs which don't implement a proper timestamp ready bitmap,
+ * verify that the timestamp value is different from the last cached
+ * timestamp. If it is not, skip this for now assuming it hasn't yet
+ * been captured by hardware.
+ */
+ if (!drop_ts && tx->verify_cached &&
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ return;
+
+ if (tx->verify_cached && raw_tstamp)
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ if (test_and_clear_bit(idx, tx->stale))
+ drop_ts = true;
+
+ if (!skb)
+ return;
+
+ if (drop_ts) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
* ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
* @tx: the PTP Tx timestamp tracker
*
@@ -575,6 +688,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -646,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
drop_ts = true;
skip_ts_read:
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
if (tx->verify_cached && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
@@ -654,7 +768,7 @@ skip_ts_read:
tx->tstamps[idx].skb = NULL;
if (test_and_clear_bit(idx, tx->stale))
drop_ts = true;
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* It is unlikely but possible that the SKB will have been
* flushed at this point due to link change or teardown.
@@ -705,7 +819,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
/* Read the Tx ready status first */
err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (err || tstamp_ready)
+ if (err)
+ break;
+ else if (tstamp_ready)
return ICE_TX_TSTAMP_WORK_PENDING;
}
@@ -722,6 +838,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
{
bool more_timestamps;
+ unsigned long flags;
if (!tx->init)
return ICE_TX_TSTAMP_WORK_DONE;
@@ -730,9 +847,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
ice_ptp_process_tx_tstamp(tx);
/* Check if there are outstanding Tx timestamps */
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
if (more_timestamps)
return ICE_TX_TSTAMP_WORK_PENDING;
@@ -769,6 +886,7 @@ ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
tx->in_use = in_use;
tx->stale = stale;
tx->init = 1;
+ tx->last_ll_ts_idx_read = -1;
spin_lock_init(&tx->lock);
@@ -786,6 +904,7 @@ static void
ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
struct ice_hw *hw = &pf->hw;
+ unsigned long flags;
u64 tstamp_ready;
int err;
u8 idx;
@@ -809,12 +928,12 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
ice_clear_phy_tstamp(hw, tx->block, phy_idx);
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
clear_bit(idx, tx->in_use);
clear_bit(idx, tx->stale);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* Count the number of Tx timestamps flushed */
pf->ptp.tx_hwtstamp_flushed++;
@@ -838,9 +957,11 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
{
- spin_lock(&tx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
}
/**
@@ -853,9 +974,11 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
static void
ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
- spin_lock(&tx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
tx->init = 0;
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* wait for potentially outstanding interrupt to complete */
synchronize_irq(pf->oicr_irq.virq);
@@ -875,7 +998,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
* @port: the port this structure tracks
@@ -886,11 +1009,11 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
* registers into chunks based on the port number.
*/
static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->block = port / ICE_PORTS_PER_QUAD;
- tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822;
- tx->len = INDEX_PER_PORT_E822;
+ tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
+ tx->len = INDEX_PER_PORT_E82X;
tx->verify_cached = 0;
return ice_ptp_alloc_tx_tracker(tx);
@@ -1093,10 +1216,10 @@ static u64 ice_base_incval(struct ice_pf *pf)
if (ice_is_e810(hw))
incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
else
- incval = UNKNOWN_INCVAL_E822;
+ incval = UNKNOWN_INCVAL_E82X;
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1125,10 +1248,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
/* need to read FIFO state */
if (offs == 0 || offs == 1)
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
&val);
else
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
&val);
if (err) {
@@ -1138,9 +1261,9 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
}
if (offs & 0x1)
- phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
else
- phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+ phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
if (phy_sts & FIFO_EMPTY) {
port->tx_fifo_busy_cnt = FIFO_OK;
@@ -1156,7 +1279,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
dev_dbg(ice_pf_to_dev(pf),
"Port %d Tx FIFO still not empty; resetting quad %d\n",
port->port_num, quad);
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
port->tx_fifo_busy_cnt = FIFO_OK;
return 0;
}
@@ -1201,8 +1324,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work)
tx_err = ice_ptp_check_tx_fifo(port);
if (!tx_err)
- tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num);
- rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num);
+ tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
+ rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
if (tx_err || rx_err) {
/* Tx and/or Rx offset not yet configured, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1231,7 +1354,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e822(hw, port, true);
+ err = ice_stop_phy_timer_e82x(hw, port, true);
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1255,6 +1378,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
struct ice_pf *pf = ptp_port_to_pf(ptp_port);
u8 port = ptp_port->port_num;
struct ice_hw *hw = &pf->hw;
+ unsigned long flags;
int err;
if (ice_is_e810(hw))
@@ -1268,20 +1392,20 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
/* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock(&ptp_port->tx.lock);
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = true;
- spin_unlock(&ptp_port->tx.lock);
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
ptp_port->tx_fifo_busy_cnt = 0;
/* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e822(hw, port);
+ err = ice_start_phy_timer_e82x(hw, port);
if (err)
goto out_unlock;
/* Enable Tx timestamps right away */
- spin_lock(&ptp_port->tx.lock);
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = false;
- spin_unlock(&ptp_port->tx.lock);
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
@@ -1323,7 +1447,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1349,7 +1473,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
&val);
if (err)
break;
@@ -1357,13 +1481,13 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
if (ena) {
val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
- Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
+ threshold);
} else {
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
}
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
val);
if (err)
break;
@@ -1503,8 +1627,7 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
* + num_in_channels * tmr_idx
*/
func = 1 + chan + (tmr_idx * 3);
- gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
- GLGEN_GPIO_CTL_PIN_FUNC_M);
+ gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
pf->ptp.ext_ts_chan |= (1 << chan);
} else {
/* clear the values we set to reset defaults */
@@ -1601,7 +1724,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
else
- start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
+ start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1614,7 +1737,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
/* 4. write GPIO CTL reg */
func = 8 + chan + (tmr_idx * 4);
val = GLGEN_GPIO_CTL_PIN_DIR_M |
- ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
+ FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
/* Store the value if requested */
@@ -1840,7 +1963,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E822)
+ if (hw->phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2127,30 +2250,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
}
/**
- * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
- * @rx_ring: Ring to get the VSI info
+ * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
* @rx_desc: Receive descriptor
- * @skb: Particular skb to send timestamp with
+ * @pkt_ctx: Packet context to get the cached time
*
* The driver receives a notification in the receive descriptor with timestamp.
- * The timestamp is in ns, so we must convert the result first.
*/
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
{
- struct skb_shared_hwtstamps *hwtstamps;
u64 ts_ns, cached_time;
u32 ts_high;
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
- return;
+ return 0;
- cached_time = READ_ONCE(rx_ring->cached_phctime);
+ cached_time = READ_ONCE(pkt_ctx->cached_phctime);
/* Do not report a timestamp if we don't have a cached PHC time */
if (!cached_time)
- return;
+ return 0;
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
* PHC value, rather than accessing the PF. This also allows us to
@@ -2161,9 +2280,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
+ return ts_ns;
}
/**
@@ -2378,18 +2495,23 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
*/
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
+ unsigned long flags;
u8 idx;
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
/* Check that this tracker is accepting new timestamp requests */
if (!ice_ptp_is_tx_tracker_up(tx)) {
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
return -1;
}
/* Find and set the first available index */
- idx = find_first_zero_bit(tx->in_use, tx->len);
+ idx = find_next_zero_bit(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx == tx->len)
+ idx = find_first_zero_bit(tx->in_use, tx->len);
+
if (idx < tx->len) {
/* We got a valid index that no other thread could have set. Store
* a reference to the skb and the start time to allow discarding old
@@ -2403,7 +2525,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
ice_trace(tx_tstamp_request, skb, idx);
}
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* return the appropriate PHY timestamp register index, -1 if no
* indexes were available.
@@ -2440,6 +2562,54 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
}
+/**
+ * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
+ * @pf: Board private structure
+ *
+ * The device PHY issues Tx timestamp interrupts to the driver for processing
+ * timestamp data from the PHY. It will not interrupt again until all
+ * current timestamp data is read. In rare circumstances, it is possible that
+ * the driver fails to read all outstanding data.
+ *
+ * To avoid getting permanently stuck, periodically check if the PHY has
+ * outstanding timestamp data. If so, trigger an interrupt from software to
+ * process this data.
+ */
+static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool trigger_oicr = false;
+ unsigned int i;
+
+ if (ice_is_e810(hw))
+ return;
+
+ if (!ice_pf_src_tmr_owned(pf))
+ return;
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (!err && tstamp_ready) {
+ trigger_oicr = true;
+ break;
+ }
+ }
+
+ if (trigger_oicr) {
+ /* Trigger a software interrupt, to ensure this data
+ * gets processed.
+ */
+ dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
+
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+}
+
static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
@@ -2451,6 +2621,8 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
err = ice_ptp_update_cached_phctime(pf);
+ ice_ptp_maybe_trigger_tx_interrupt(pf);
+
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(err ? 10 : 500));
@@ -2468,12 +2640,10 @@ void ice_ptp_reset(struct ice_pf *pf)
int err, itr = 1;
u64 time_diff;
- if (test_bit(ICE_PFR_REQ, pf->state))
+ if (test_bit(ICE_PFR_REQ, pf->state) ||
+ !ice_pf_src_tmr_owned(pf))
goto pfr;
- if (!ice_pf_src_tmr_owned(pf))
- goto reset_ts;
-
err = ice_ptp_init_phc(hw);
if (err)
goto err;
@@ -2517,10 +2687,6 @@ void ice_ptp_reset(struct ice_pf *pf)
goto err;
}
-reset_ts:
- /* Restart the PHY timestamping block */
- ice_ptp_reset_phy_timestamping(pf);
-
pfr:
/* Init Tx structures */
if (ice_is_e810(&pf->hw)) {
@@ -2528,7 +2694,7 @@ pfr:
} else {
kthread_init_delayed_work(&ptp->port.ov_work,
ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
ptp->port.port_num);
}
if (err)
@@ -2536,6 +2702,11 @@ pfr:
set_bit(ICE_FLAG_PTP, pf->flags);
+ /* Restart the PHY timestamping block */
+ if (!test_bit(ICE_PFR_REQ, pf->state) &&
+ ice_pf_src_tmr_owned(pf))
+ ice_ptp_restart_all_phy(pf);
+
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2692,6 +2863,8 @@ static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
ice_get_ptp_src_clock_index(&pf->hw));
+ if (!name)
+ return -ENOMEM;
aux_driver->name = name;
aux_driver->shutdown = ice_ptp_auxbus_shutdown;
@@ -2896,11 +3069,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
return -ENODEV;
@@ -2938,6 +3111,8 @@ static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
ice_get_ptp_src_clock_index(&pf->hw));
+ if (!name)
+ return -ENOMEM;
aux_dev->name = name;
aux_dev->id = id;
@@ -2987,7 +3162,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (pf->hw.phy_model) {
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 06a330867fc9..087dd32d8762 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -131,6 +131,7 @@ enum ice_tx_tstamp_work {
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
* @verify_cached: if true, verify new timestamp differs from last read value
+ * @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
spinlock_t lock; /* lock protecting in_use bitmap */
@@ -143,11 +144,12 @@ struct ice_ptp_tx {
u8 init : 1;
u8 calibrating : 1;
u8 verify_cached : 1;
+ s8 last_ll_ts_idx_read;
};
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
-#define INDEX_PER_PORT_E822 16
+#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
/**
@@ -296,11 +298,12 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
+void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx);
void ice_ptp_reset(struct ice_pf *pf);
void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
@@ -325,13 +328,23 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
+static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
+{ }
+
+static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { }
+
static inline bool ice_ptp_process_ts(struct ice_pf *pf)
{
return true;
}
-static inline void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+
+static inline u64
+ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
+{
+ return 0;
+}
+
static inline void ice_ptp_reset(struct ice_pf *pf) { }
static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 4109aa3b2fcd..2c4dab0c48ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,17 +9,17 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
-/* struct ice_time_ref_info_e822
+/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
* This lookup table defines several constants that depend on the current time
- * reference. See the struct ice_time_ref_info_e822 for information about the
+ * reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
@@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
},
};
-const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* refclk_pre_div */
@@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
-/* struct ice_vernier_info_e822
+/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
* actual packet transmission or reception during the initialization of the
@@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
* used by this link speed, and that the register should be cleared by writing
* 0. Other values specify the clock frequency in Hz.
*/
-const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* ICE_PTP_LNK_SPD_1G */
{
/* tx_par_clk */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index a00b55e14aac..187ce9b54e1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -284,19 +284,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
- * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
static void
-ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E822;
- phy = port / ICE_PORTS_PER_PHY_E822;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
+ phy_port = port % ICE_PORTS_PER_PHY_E82X;
+ phy = port / ICE_PORTS_PER_PHY_E82X;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -315,7 +315,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
}
/**
- * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
*
@@ -323,7 +323,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
* represented as two 32bit registers. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_PAR_PCS_TX_OFFSET_L:
@@ -368,7 +368,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 40bit value
*
@@ -377,7 +377,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* upper 32 bits in the high register. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_TIMETUS_L:
@@ -413,7 +413,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_read_phy_reg_e822 - Read a PHY register
+ * ice_read_phy_reg_e82x - Read a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @offset: PHY register offset to read
@@ -422,12 +422,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* Read a PHY register for the given port over the device sideband queue.
*/
static int
-ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -443,7 +443,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
}
/**
- * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -455,7 +455,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
* known to be two parts of a 64bit value.
*/
static int
-ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
{
u32 low, high;
u16 high_addr;
@@ -464,20 +464,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
}
- err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
high_addr, err);
@@ -490,7 +490,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
}
/**
- * ice_write_phy_reg_e822 - Write a PHY register
+ * ice_write_phy_reg_e82x - Write a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to write to
* @offset: PHY register offset to write
@@ -499,12 +499,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
* Write a PHY register for the given port over the device sideband queue.
*/
static int
-ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -519,7 +519,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
}
/**
- * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
* @hw: pointer to the HW struct
* @port: port to write to
* @low_addr: offset of the low register
@@ -529,7 +529,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
* it up into two chunks, the lower 8 bits and the upper 32 bits.
*/
static int
-ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -538,7 +538,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into a lower 8 bit
* register and an upper 32 bit register.
*/
- if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -547,14 +547,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = (u32)(val & P_REG_40B_LOW_M);
high = (u32)(val >> P_REG_40B_HIGH_S);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -565,7 +565,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -577,7 +577,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* a 64bit value.
*/
static int
-ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -586,7 +586,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -595,14 +595,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = lower_32_bits(val);
high = upper_32_bits(val);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -613,7 +613,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * ice_fill_quad_msg_e82x - Fill message data for quad register access
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
@@ -622,7 +622,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* multiple PHYs.
*/
static int
-ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
@@ -631,7 +631,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -643,7 +643,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
}
/**
- * ice_read_quad_reg_e822 - Read a PHY quad register
+ * ice_read_quad_reg_e82x - Read a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to read from
* @offset: quad register offset to read
@@ -653,12 +653,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
* shared between multiple PHYs.
*/
int
-ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -677,7 +677,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
}
/**
- * ice_write_quad_reg_e822 - Write a PHY quad register
+ * ice_write_quad_reg_e82x - Write a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to write to
* @offset: quad register offset to write
@@ -687,12 +687,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
* shared between multiple PHYs.
*/
int
-ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -710,7 +710,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
}
/**
- * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to read
@@ -721,7 +721,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
* family of devices.
*/
static int
-ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
{
u16 lo_addr, hi_addr;
u32 lo, hi;
@@ -730,14 +730,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
- err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
@@ -754,7 +754,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
}
/**
- * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to reset
@@ -770,18 +770,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
*
* To directly clear the contents of the timestamp block entirely, discarding
* all timestamp data at once, software should instead use
- * ice_ptp_reset_ts_memory_quad_e822().
+ * ice_ptp_reset_ts_memory_quad_e82x().
*
* This function should only be called on an idx whose bit is set according to
* ice_get_phy_tx_tstamp_ready().
*/
static int
-ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
{
u64 unused_tstamp;
int err;
- err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
+ err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
quad, idx, err);
@@ -792,33 +792,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
}
/**
- * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
+ * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
*
* Clear all timestamps from the PHY quad block that is shared between the
* internal PHYs on the E822 devices.
*/
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
{
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
}
/**
- * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
+ * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
* @hw: pointer to the HW struct
*/
-static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
+static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
for (quad = 0; quad < ICE_MAX_QUAD; quad++)
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e822 - Read a CGU register
+ * ice_read_cgu_reg_e82x - Read a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to read
* @val: storage for register value read
@@ -827,7 +827,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
* applicable to E822 devices.
*/
static int
-ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -850,7 +850,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
}
/**
- * ice_write_cgu_reg_e822 - Write a CGU register
+ * ice_write_cgu_reg_e82x - Write a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to write
* @val: value to write into the register
@@ -859,7 +859,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
* applicable to E822 devices.
*/
static int
-ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -925,7 +925,7 @@ static const char *ice_clk_src_str(u8 clk_src)
}
/**
- * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
* @hw: pointer to the HW struct
* @clk_freq: Clock frequency to program
* @clk_src: Clock source to select (TIME_REF, or TCX0)
@@ -934,7 +934,7 @@ static const char *ice_clk_src_str(u8 clk_src)
* time reference, enabling the PLL which drives the PTP hardware clock.
*/
static int
-ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
enum ice_clk_src clk_src)
{
union tspll_ro_bwm_lf bwm_lf;
@@ -963,15 +963,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
return -EINVAL;
}
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -986,43 +986,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
if (dw24.field.ts_pll_enable) {
dw24.field.ts_pll_enable = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
}
/* Set the frequency */
dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
if (err)
return err;
/* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
if (err)
return err;
dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
dw19.field.tspll_ndivratio = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
if (err)
return err;
/* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
if (err)
return err;
dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
dw22.field.time1588clk_sel_div2 = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
if (err)
return err;
/* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
@@ -1030,21 +1030,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
dw24.field.time_ref_sel = clk_src;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Finally, enable the PLL */
dw24.field.ts_pll_enable = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Wait to verify if the PLL locks */
usleep_range(1000, 5000);
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -1064,18 +1064,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
}
/**
- * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
* @hw: pointer to the HW structure
*
* Initialize the Clock Generation Unit of the E822 device.
*/
-static int ice_init_cgu_e822(struct ice_hw *hw)
+static int ice_init_cgu_e82x(struct ice_hw *hw)
{
struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
union tspll_cntr_bist_settings cntr_bist;
int err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
&cntr_bist.val);
if (err)
return err;
@@ -1084,7 +1084,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
cntr_bist.field.i_plllock_sel_0 = 0;
cntr_bist.field.i_plllock_sel_1 = 0;
- err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
cntr_bist.val);
if (err)
return err;
@@ -1092,7 +1092,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
if (err)
return err;
@@ -1113,7 +1113,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
PTP_VERNIER_WL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
@@ -1126,12 +1126,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
* @hw: pointer to HW struct
*
* Perform PHC initialization steps specific to E822 devices.
*/
-static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
u32 regval;
@@ -1145,7 +1145,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
wr32(hw, PF_SB_REM_DEV_CTL, regval);
/* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e822(hw);
+ err = ice_init_cgu_e82x(hw);
if (err)
return err;
@@ -1154,7 +1154,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
}
/**
- * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
* @hw: pointer to the HW struct
* @time: Time to initialize the PHY port clocks to
*
@@ -1164,7 +1164,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* units of nominal nanoseconds.
*/
static int
-ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
{
u64 phy_time;
u8 port;
@@ -1177,14 +1177,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
/* Tx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
phy_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_RX_TIMER_INC_PRE_L,
phy_time);
if (err)
@@ -1201,7 +1201,7 @@ exit_err:
}
/**
- * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
* @hw: pointer to HW struct
* @port: Port number to be programmed
* @time: time in cycles to adjust the port Tx and Rx clocks
@@ -1216,7 +1216,7 @@ exit_err:
* Negative adjustments are supported using 2s complement arithmetic.
*/
static int
-ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
{
u32 l_time, u_time;
int err;
@@ -1225,23 +1225,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
u_time = upper_32_bits(time);
/* Tx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
@@ -1255,7 +1255,7 @@ exit_err:
}
/**
- * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment in nanoseconds
*
@@ -1264,7 +1264,7 @@ exit_err:
* ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
-ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
{
s64 cycles;
u8 port;
@@ -1281,7 +1281,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
if (err)
return err;
}
@@ -1290,7 +1290,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
}
/**
- * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
* @hw: pointer to HW struct
* @incval: new increment value to prepare
*
@@ -1299,13 +1299,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
* issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
-ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
{
int err;
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
goto exit_err;
@@ -1337,7 +1337,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
int err;
/* Tx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
err);
@@ -1348,7 +1348,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
(unsigned long long)*tx_ts);
/* Rx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
err);
@@ -1362,7 +1362,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
@@ -1372,8 +1372,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
* Do not use this function directly. If you want to configure exactly one
* port, use ice_ptp_one_port_cmd() instead.
*/
-static int
-ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1403,7 +1403,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Tx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
err);
@@ -1414,7 +1414,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
err);
@@ -1423,7 +1423,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Rx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
err);
@@ -1434,7 +1434,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1469,7 +1469,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
else
cmd = ICE_PTP_NOP;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1478,7 +1478,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
}
/**
- * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
*
@@ -1486,14 +1486,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
* command.
*/
static int
-ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1509,7 +1509,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
/**
- * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
* @hw: pointer to HW struct
* @port: the port to read from
* @link_out: if non-NULL, holds link speed on success
@@ -1519,7 +1519,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
* algorithm.
*/
static int
-ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd *link_out,
enum ice_ptp_fec_mode *fec_out)
{
@@ -1528,7 +1528,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
u32 serdes;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
return err;
@@ -1585,18 +1585,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
* @hw: pointer to HW struct
* @port: to configure the quad for
*/
-static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
int err;
u32 val;
u8 quad;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
err);
@@ -1605,7 +1605,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
quad = port / ICE_PORTS_PER_QUAD;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
err);
@@ -1617,7 +1617,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
else
val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
err);
@@ -1626,7 +1626,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
* @hw: pointer to the HW structure
* @port: the port to configure
*
@@ -1671,12 +1671,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
* a divide by 390,625,000. This does lose some precision, but avoids
* miscalculation due to arithmetic overflow.
*/
-static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, uix;
int err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second divided by 256 */
@@ -1688,7 +1688,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 10Gb/40Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
@@ -1699,7 +1699,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 25Gb/100Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
@@ -1711,7 +1711,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
* @hw: pointer to the HW struct
* @port: port to configure
*
@@ -1753,18 +1753,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
* frequency is ~29 bits, so multiplying them together should fit within the
* 64 bit arithmetic.
*/
-static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
int err;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per cycle of the PHC clock */
@@ -1784,7 +1784,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1796,7 +1796,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1808,7 +1808,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1820,7 +1820,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1832,7 +1832,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1844,7 +1844,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1856,7 +1856,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1868,23 +1868,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
phy_tus);
}
/**
- * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
* @hw: pointer to the HW struct
* @link_spd: the Link speed to calculate for
*
* Calculate the fixed offset due to known static latency data.
*/
static u64
-ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -1904,7 +1904,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -1926,7 +1926,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -1935,7 +1935,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
port, err);
@@ -1945,7 +1945,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -1955,11 +1955,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_TX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
@@ -1970,7 +1970,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_25G_RS ||
link_spd == ICE_PTP_LNK_SPD_40G ||
link_spd == ICE_PTP_LNK_SPD_50G) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_TX_OFFSET_L,
&val);
if (err)
@@ -1985,7 +1985,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
*/
if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_TX_TIME_L,
&val);
if (err)
@@ -1998,12 +1998,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Tx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
if (err)
return err;
@@ -2014,7 +2014,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
* @hw: pointer to the HW struct
* @port: the PHY port to adjust for
* @link_spd: the current link speed of the PHY
@@ -2026,7 +2026,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* various delays caused when receiving a packet.
*/
static int
-ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd link_spd,
enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
{
@@ -2035,7 +2035,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u32 val;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
err);
@@ -2044,7 +2044,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
pmd_align = (u8)val;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2123,7 +2123,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
@@ -2145,7 +2145,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
@@ -2172,18 +2172,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
* @hw: pointer to HW struct
* @link_spd: The Link speed to calculate for
*
* Determine the fixed Rx latency for a given link speed.
*/
static u64
-ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2203,7 +2203,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -2229,7 +2229,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -2238,7 +2238,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
port, err);
@@ -2248,7 +2248,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -2258,16 +2258,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_RX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
*/
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_RX_OFFSET_L,
&val);
if (err)
@@ -2282,7 +2282,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_50G ||
link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_RX_TIME_L,
&val);
if (err)
@@ -2292,7 +2292,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/* In addition, Rx must account for the PMD alignment */
- err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
if (err)
return err;
@@ -2308,12 +2308,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Rx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
if (err)
return err;
@@ -2324,7 +2324,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
* @phy_time: on return, the 64bit PHY timer value
@@ -2334,7 +2334,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* and PHC timer values.
*/
static int
-ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
u64 *phc_time)
{
u64 tx_time, rx_time;
@@ -2381,7 +2381,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
}
/**
- * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
* @hw: pointer to the HW struct
* @port: the PHY port to synchronize
*
@@ -2392,7 +2392,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* to the PHY timer in order to ensure it reads the same value as the
* primary PHC timer.
*/
-static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u64 phc_time, phy_time, difference;
int err;
@@ -2402,7 +2402,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
return -EBUSY;
}
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2416,7 +2416,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
*/
difference = phc_time - phy_time;
- err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
if (err)
goto err_unlock;
@@ -2433,7 +2433,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
/* Re-capture the timer values to flush the command registers and
* verify that the time was properly adjusted.
*/
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2452,7 +2452,7 @@ err_unlock:
}
/**
- * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * ice_stop_phy_timer_e82x - Stop the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to stop
* @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
@@ -2462,36 +2462,36 @@ err_unlock:
* initialized or when link speed changes.
*/
int
-ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
{
int err;
u32 val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
if (err)
return err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val &= ~P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
if (soft_reset) {
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
}
@@ -2502,7 +2502,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
}
/**
- * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * ice_start_phy_timer_e82x - Start the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to start
*
@@ -2512,7 +2512,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
*
* Hardware will take Vernier measurements on Tx or Rx of packets.
*/
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u32 lo, hi, val;
u64 incval;
@@ -2521,17 +2521,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
tmr_idx = ice_get_ptp_src_clock_index(hw);
- err = ice_stop_phy_timer_e822(hw, port, false);
+ err = ice_stop_phy_timer_e82x(hw, port, false);
if (err)
return err;
- ice_phy_cfg_lane_e822(hw, port);
+ ice_phy_cfg_lane_e82x(hw, port);
- err = ice_phy_cfg_uix_e822(hw, port);
+ err = ice_phy_cfg_uix_e82x(hw, port);
if (err)
return err;
- err = ice_phy_cfg_parpcs_e822(hw, port);
+ err = ice_phy_cfg_parpcs_e82x(hw, port);
if (err)
return err;
@@ -2539,7 +2539,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
incval = (u64)hi << 32 | lo;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
if (err)
return err;
@@ -2552,22 +2552,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
@@ -2578,18 +2578,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
val |= P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_LOAD_OFFSET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
ice_ptp_exec_tmr_cmd(hw);
- err = ice_sync_phy_timer_e822(hw, port);
+ err = ice_sync_phy_timer_e82x(hw, port);
if (err)
return err;
@@ -2599,7 +2599,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
+ * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
* @hw: pointer to the HW struct
* @quad: the timestamp quad to read from
* @tstamp_ready: contents of the Tx memory status register
@@ -2609,19 +2609,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
* ready to be captured from the PHY timestamp block.
*/
static int
-ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
+ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
{
u32 hi, lo;
int err;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
quad, err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
quad, err);
@@ -3307,7 +3307,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw)
if (ice_is_e810(hw))
hw->phy_model = ICE_PHY_E810;
else
- hw->phy_model = ICE_PHY_E822;
+ hw->phy_model = ICE_PHY_E82X;
}
/**
@@ -3332,8 +3332,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
break;
- case ICE_PHY_E822:
- err = ice_ptp_port_cmd_e822(hw, cmd);
+ case ICE_PHY_E82X:
+ err = ice_ptp_port_cmd_e82x(hw, cmd);
break;
default:
err = -EOPNOTSUPP;
@@ -3384,8 +3384,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
default:
err = -EOPNOTSUPP;
@@ -3426,8 +3426,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
default:
err = -EOPNOTSUPP;
@@ -3492,8 +3492,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
default:
err = -EOPNOTSUPP;
@@ -3521,8 +3521,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E822:
- return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ case ICE_PHY_E82X:
+ return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -3549,8 +3549,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E822:
- return ice_clear_phy_tstamp_e822(hw, block, idx);
+ case ICE_PHY_E82X:
+ return ice_clear_phy_tstamp_e82x(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -3608,8 +3608,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
switch (hw->phy_model) {
- case ICE_PHY_E822:
- ice_ptp_reset_ts_memory_e822(hw);
+ case ICE_PHY_E82X:
+ ice_ptp_reset_ts_memory_e82x(hw);
break;
case ICE_PHY_E810:
default:
@@ -3636,8 +3636,8 @@ int ice_ptp_init_phc(struct ice_hw *hw)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E822:
- return ice_ptp_init_phc_e822(hw);
+ case ICE_PHY_E82X:
+ return ice_ptp_init_phc_e82x(hw);
default:
return -EOPNOTSUPP;
}
@@ -3660,8 +3660,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E822:
- return ice_get_phy_tx_tstamp_ready_e822(hw, block,
+ case ICE_PHY_E82X:
+ return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
break;
default:
@@ -3942,7 +3942,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_SGMII:
- *pin_num = ICE_E822_RCLK_PINS_NUM;
+ *pin_num = ICE_E82X_RCLK_PINS_NUM;
ret = 0;
if (hw->cgu_part_number ==
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index cf76701566c7..1f3e03124430 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -42,7 +42,7 @@ enum ice_ptp_fec_mode {
};
/**
- * struct ice_time_ref_info_e822
+ * struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
* @pps_delay: propagation delay of the PPS output signal
@@ -50,14 +50,14 @@ enum ice_ptp_fec_mode {
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
*/
-struct ice_time_ref_info_e822 {
+struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
u8 pps_delay;
};
/**
- * struct ice_vernier_info_e822
+ * struct ice_vernier_info_e82x
* @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
* @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
* @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
@@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 {
* different link speeds, either the deskew marker for multi-lane link speeds
* or the Reed Solomon gearbox marker for RS-FEC.
*/
-struct ice_vernier_info_e822 {
+struct ice_vernier_info_e82x {
u32 tx_par_clk;
u32 rx_par_clk;
u32 tx_pcs_clk;
@@ -95,7 +95,7 @@ struct ice_vernier_info_e822 {
};
/**
- * struct ice_cgu_pll_params_e822
+ * struct ice_cgu_pll_params_e82x
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -104,7 +104,7 @@ struct ice_vernier_info_e822 {
* Clock Generation Unit parameters used to program the PLL based on the
* selected TIME_REF frequency.
*/
-struct ice_cgu_pll_params_e822 {
+struct ice_cgu_pll_params_e82x {
u32 refclk_pre_div;
u32 feedback_div;
u32 frac_n_div;
@@ -124,7 +124,7 @@ enum ice_phy_rclk_pins {
};
#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
-#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
(_pin) + ZL_REF1P)
@@ -183,16 +183,16 @@ struct ice_cgu_pin_desc {
};
extern const struct
-ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
/* Table of constants for Vernier calibration on E822 */
-extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
@@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
/* E822 family functions */
-int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
-int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
+int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
/**
- * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * ice_e82x_time_ref - Get the current TIME_REF from capabilities
* @hw: pointer to the HW structure
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
/**
- * ice_set_e822_time_ref - Set new TIME_REF
+ * ice_set_e82x_time_ref - Set new TIME_REF
* @hw: pointer to the HW structure
* @time_ref: new TIME_REF to set
*
@@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pps_delay;
}
/* E822 Vernier calibration functions */
-int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);
+int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
@@ -509,6 +509,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define TS_LL_READ_RETRIES 200
#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS_INTR BIT(30)
#define TS_LL_READ_TS BIT(31)
/* Internal PHY timestamp address */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index c686ac0935eb..5f30fb131f74 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -14,7 +14,7 @@
*/
static int ice_repr_get_sw_port_id(struct ice_repr *repr)
{
- return repr->vf->pf->hw.port_info->lport;
+ return repr->src_vsi->back->hw.port_info->lport;
}
/**
@@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
return -EOPNOTSUPP;
res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
- repr->vf->vf_id);
+ repr->id);
if (res <= 0)
return -EOPNOTSUPP;
return 0;
@@ -278,25 +278,67 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
+static void ice_repr_remove_node(struct devlink_port *devlink_port)
+{
+ devl_lock(devlink_port->devlink);
+ devl_rate_leaf_destroy(devlink_port);
+ devl_unlock(devlink_port->devlink);
+}
+
/**
- * ice_repr_add - add representor for VF
- * @vf: pointer to VF structure
+ * ice_repr_rem - remove representor from VF
+ * @repr: pointer to representor structure
*/
-static int ice_repr_add(struct ice_vf *vf)
+static void ice_repr_rem(struct ice_repr *repr)
+{
+ kfree(repr->q_vector);
+ free_netdev(repr->netdev);
+ kfree(repr);
+}
+
+/**
+ * ice_repr_rem_vf - remove representor from VF
+ * @repr: pointer to representor structure
+ */
+void ice_repr_rem_vf(struct ice_repr *repr)
+{
+ ice_repr_remove_node(&repr->vf->devlink_port);
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_vf_port(repr->vf);
+ ice_virtchnl_set_dflt_ops(repr->vf);
+ ice_repr_rem(repr);
+}
+
+static void ice_repr_set_tx_topology(struct ice_pf *pf)
+{
+ struct devlink *devlink;
+
+ /* only export if ADQ and DCB disabled and eswitch enabled*/
+ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
+ !ice_is_switchdev_running(pf))
+ return;
+
+ devlink = priv_to_devlink(pf);
+ ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
+}
+
+/**
+ * ice_repr_add - add representor for generic VSI
+ * @pf: pointer to PF structure
+ * @src_vsi: pointer to VSI structure of device to represent
+ * @parent_mac: device MAC address
+ */
+static struct ice_repr *
+ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
- struct ice_vsi *vsi;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
@@ -304,9 +346,7 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc;
}
- repr->src_vsi = vsi;
- repr->vf = vf;
- vf->repr = repr;
+ repr->src_vsi = src_vsi;
np = netdev_priv(repr->netdev);
np->repr = repr;
@@ -316,10 +356,40 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc_q_vector;
}
repr->q_vector = q_vector;
+ repr->q_id = repr->id;
+
+ ether_addr_copy(repr->parent_mac, parent_mac);
+
+ return repr;
+
+err_alloc_q_vector:
+ free_netdev(repr->netdev);
+err_alloc:
+ kfree(repr);
+ return ERR_PTR(err);
+}
+
+struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+{
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+ int err;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return ERR_PTR(-ENOENT);
err = ice_devlink_create_vf_port(vf);
if (err)
- goto err_devlink;
+ return ERR_PTR(err);
+
+ repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
+ if (IS_ERR(repr)) {
+ err = PTR_ERR(repr);
+ goto err_repr_add;
+ }
+
+ repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
@@ -331,100 +401,23 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_netdev;
ice_virtchnl_set_repr_ops(vf);
+ ice_repr_set_tx_topology(vf->pf);
- return 0;
+ return repr;
err_netdev:
+ ice_repr_rem(repr);
+err_repr_add:
ice_devlink_destroy_vf_port(vf);
-err_devlink:
- kfree(repr->q_vector);
- vf->repr->q_vector = NULL;
-err_alloc_q_vector:
- free_netdev(repr->netdev);
- repr->netdev = NULL;
-err_alloc:
- kfree(repr);
- vf->repr = NULL;
- return err;
-}
-
-/**
- * ice_repr_rem - remove representor from VF
- * @vf: pointer to VF structure
- */
-static void ice_repr_rem(struct ice_vf *vf)
-{
- if (!vf->repr)
- return;
-
- kfree(vf->repr->q_vector);
- vf->repr->q_vector = NULL;
- unregister_netdev(vf->repr->netdev);
- ice_devlink_destroy_vf_port(vf);
- free_netdev(vf->repr->netdev);
- vf->repr->netdev = NULL;
- kfree(vf->repr);
- vf->repr = NULL;
-
- ice_virtchnl_set_dflt_ops(vf);
-}
-
-/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- struct devlink *devlink;
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- ice_repr_rem(vf);
-
- /* since all port representors are destroyed, there is
- * no point in keeping the nodes
- */
- devlink = priv_to_devlink(pf);
- devl_lock(devlink);
- devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
+ return ERR_PTR(err);
}
-/**
- * ice_repr_add_for_all_vfs - add port representor for all VFs
- * @pf: pointer to PF structure
- */
-int ice_repr_add_for_all_vfs(struct ice_pf *pf)
+struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
- struct devlink *devlink;
- struct ice_vf *vf;
- unsigned int bkt;
- int err;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- err = ice_repr_add(vf);
- if (err)
- goto err;
- }
-
- /* only export if ADQ and DCB disabled */
- if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
- return 0;
-
- devlink = priv_to_devlink(pf);
- ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
-
- return 0;
-
-err:
- ice_repr_rem_from_all_vfs(pf);
+ if (!vsi->vf)
+ return NULL;
- return err;
+ return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index e1ee2d2c1d2d..f9aede315716 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -13,14 +13,17 @@ struct ice_repr {
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
+ int q_id;
+ u32 id;
+ u8 parent_mac[ETH_ALEN];
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
#endif
};
-int ice_repr_add_for_all_vfs(struct ice_pf *pf);
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
+void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
@@ -29,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
+
+struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2f4a621254e8..d174a4eeb899 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1387,8 +1387,7 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
u32 val, clk_src;
val = rd32(hw, GLGEN_CLKSTAT_SRC);
- clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
- GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
+ clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val);
#define PSM_CLK_SRC_367_MHZ 0x0
#define PSM_CLK_SRC_416_MHZ 0x1
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index e1494f24f661..b0f78c2f2790 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -106,10 +106,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
for (v = first; v <= last; v++) {
u32 reg;
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
+ reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
+ FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
@@ -172,13 +170,14 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- mutex_lock(&vfs->table_lock);
+ ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
- ice_eswitch_release(pf);
+ mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
+ ice_eswitch_detach(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -274,24 +273,20 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
(device_based_first_msix + vf->num_msix) - 1;
device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
+ FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
+ VPINT_ALLOC_VALID_M;
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
+ FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
+ VPINT_ALLOC_PCI_VALID_M;
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
+ reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
+ FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
@@ -324,10 +319,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
*/
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
+ reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
+ FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
@@ -342,10 +335,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
*/
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
+ reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
+ FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
@@ -609,6 +600,14 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
+ retval = ice_eswitch_attach(pf, vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
+ vf->vf_id, retval);
+ ice_vf_vsi_release(vf);
+ goto teardown;
+ }
+
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
ice_ena_vf_mappings(vf);
wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
@@ -763,24 +762,6 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
}
/**
- * ice_sriov_create_vsi - Create a new VSI for a VF
- * @vf: VF to create the VSI for
- *
- * This is called by ice_vf_recreate_vsi to create the new VSI after the old
- * VSI has been released.
- */
-static int ice_sriov_create_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi;
-
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
* ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
* @vf: VF to perform tasks on
*/
@@ -799,7 +780,6 @@ static const struct ice_vf_ops ice_sriov_vf_ops = {
.poll_reset_status = ice_sriov_poll_reset_status,
.clear_reset_trigger = ice_sriov_clear_reset_trigger,
.irq_close = NULL,
- .create_vsi = ice_sriov_create_vsi,
.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
};
@@ -918,6 +898,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
+ ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
@@ -927,12 +908,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
clear_bit(ICE_VF_DIS, pf->state);
- ret = ice_eswitch_configure(pf);
- if (ret) {
- dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
- goto err_unroll_sriov;
- }
-
/* rearm global interrupts */
if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1093,6 +1068,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
struct ice_pf *pf = pci_get_drvdata(pdev);
u16 prev_msix, prev_queues, queues;
bool needs_rebuild = false;
+ struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
@@ -1127,6 +1103,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (!vf)
return -ENOENT;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -ENOENT;
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1147,8 +1127,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- ice_vf_vsi_release(vf);
- if (vf->vf_ops->create_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1174,8 +1153,10 @@ unroll:
if (vf->first_vector_idx < 0)
return -EINVAL;
- if (needs_rebuild)
- vf->vf_ops->create_vsi(vf);
+ if (needs_rebuild) {
+ ice_vf_reconfig_vsi(vf);
+ ice_vf_init_host_cfg(vf, vsi);
+ }
ice_ena_vf_mappings(vf);
ice_put_vf(vf);
@@ -1324,8 +1305,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
+ queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
if (!vf)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ee19f3aa3d19..f84bab80ca42 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2492,25 +2492,24 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
switch (f_info->fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ f_info->fwd_id.hw_vsi_id);
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_VSI_LIST:
act |= ICE_SINGLE_ACT_VSI_LIST;
- act |= (f_info->fwd_id.vsi_list_id <<
- ICE_SINGLE_ACT_VSI_LIST_ID_S) &
- ICE_SINGLE_ACT_VSI_LIST_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M,
+ f_info->fwd_id.vsi_list_id);
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_Q:
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ f_info->fwd_id.q_id);
break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
@@ -2520,10 +2519,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
q_rgn = f_info->qgrp_size > 0 ?
(u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
- act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
- ICE_SINGLE_ACT_Q_REGION_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ f_info->fwd_id.q_id);
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
break;
default:
return;
@@ -2649,7 +2647,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id);
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->act[0] = cpu_to_le32(act);
@@ -2657,16 +2655,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
- act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1);
lg_act->act[1] = cpu_to_le32(act);
- act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
- ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
+ act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M,
+ ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX);
/* Third action Marker value */
act |= ICE_LG_ACT_GENERIC;
- act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
- ICE_LG_ACT_GENERIC_VALUE_M;
+ act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker);
lg_act->act[2] = cpu_to_le32(act);
@@ -2675,9 +2672,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
- rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
- ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
- ICE_SINGLE_ACT_PTR_VAL_M));
+ act = ICE_SINGLE_ACT_PTR;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id);
+ rx_tx->act = cpu_to_le32(act);
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
@@ -4426,8 +4423,8 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int status;
buf->num_elems = cpu_to_le16(num_items);
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
+ alloc_shared);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
@@ -4454,8 +4451,8 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int status;
buf->num_elems = cpu_to_le16(num_items);
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
+ alloc_shared);
buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
@@ -4481,18 +4478,15 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
+ u16 res_type;
int status;
buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type);
if (shared)
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
- else
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) &
- ~ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ buf->res_type = cpu_to_le16(res_type);
buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
status = ice_aq_alloc_free_res(hw, buf, buf_len,
ice_aqc_opc_share_res);
@@ -5024,8 +5018,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
entry->chain_idx = chain_idx;
content->result_indx =
ICE_AQ_RECIPE_RESULT_EN |
- ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
- ICE_AQ_RECIPE_RESULT_DATA_M);
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ chain_idx);
clear_bit(chain_idx, result_idx_bm);
chain_idx = find_first_bit(result_idx_bm,
ICE_MAX_FV_WORDS);
@@ -6065,6 +6059,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO;
goto free_pkt_profile;
@@ -6077,9 +6072,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
- rinfo->sw_act.fltr_act == ICE_NOP)
+ rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_NOP) {
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
+ }
if (rinfo->src_vsi)
rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
@@ -6115,38 +6112,45 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = -ENOMEM;
goto free_pkt_profile;
}
- if (!rinfo->flags_info.act_valid) {
- act |= ICE_SINGLE_ACT_LAN_ENABLE;
- act |= ICE_SINGLE_ACT_LB_ENABLE;
- } else {
- act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_LB_ENABLE);
+
+ if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) {
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
}
switch (rinfo->sw_act.fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
- ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_Q:
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ rinfo->sw_act.fwd_id.q_id);
break;
case ICE_FWD_TO_QGRP:
q_rgn = rinfo->sw_act.qgrp_size > 0 ?
(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
- act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
- ICE_SINGLE_ACT_Q_REGION_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ rinfo->sw_act.fwd_id.q_id);
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
+ case ICE_MIRROR_PACKET:
+ act |= ICE_SINGLE_ACT_OTHER_ACTS;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
+ break;
case ICE_NOP:
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
rinfo->sw_act.fwd_id.hw_vsi_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index dd03cb69ad26..b890410a2bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -653,7 +653,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
ice_tc_is_dev_uplink(target_dev)) {
repr = ice_netdev_to_repr(filter_dev);
- fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
+ fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
@@ -689,6 +689,41 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
return 0;
}
+static int ice_tc_setup_mirror_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev)
+{
+ struct ice_repr *repr;
+
+ fltr->action.fltr_act = ICE_MIRROR_PACKET;
+
+ if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_tc_is_dev_uplink(target_dev)) {
+ repr = ice_netdev_to_repr(filter_dev);
+
+ fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
@@ -710,6 +745,12 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
break;
+ case FLOW_ACTION_MIRRED:
+ err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev);
+ if (err)
+ return err;
+ break;
+
default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
return -EINVAL;
@@ -765,7 +806,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
- fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
+ fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9e97ea863068..97d41d6ebf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
- if (rx_ring->vsi->type == ICE_VSI_PF &&
- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
- goto err;
return 0;
err:
@@ -557,13 +552,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
* @rx_buf: Rx buffer to store the XDP action
+ * @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf)
+ struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -571,6 +567,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
+ ice_xdp_meta_set_desc(xdp, eop_desc);
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
@@ -600,9 +598,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- rx_buf->act = ret;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@@ -890,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
return -ENOMEM;
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
+ /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+ * can pop off frags but driver has to handle it on its own
+ */
+ rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1180,8 +1179,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -1241,7 +1239,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
@@ -1249,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1264,10 +1263,12 @@ construct_skb:
ICE_XDP_CONSUMED);
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
break;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1276,7 +1277,7 @@ construct_skb:
continue;
}
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
+ vlan_tci = ice_get_vlan_tci(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb))
@@ -1286,14 +1287,11 @@ construct_skb:
total_rx_bytes += skb->len;
/* populate checksum, VLAN, and protocol */
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
-
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
/* update budget accounting */
total_rx_pkts++;
@@ -1494,9 +1492,9 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
* be static in non-adaptive mode (user configured)
*/
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
- GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
- GLINT_DYN_CTL_WB_ON_ITR_M);
+ FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
+ FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
+ FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
q_vector->wb_on_itr = true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index daf7b9dbb143..af955b0e5dc5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -257,6 +257,20 @@ enum ice_rx_dtype {
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
+struct ice_pkt_ctx {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+};
+
+struct ice_xdp_buff {
+ struct xdp_buff xdp_buff;
+ const union ice_32b_rx_flex_desc *eop_desc;
+ const struct ice_pkt_ctx *pkt_ctx;
+};
+
+/* Required for compatibility with xdp_buffs from xsk_pool */
+static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
+
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -298,7 +312,6 @@ enum ice_dynamic_itr {
/* descriptor ring, associated with a VSI */
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -310,13 +323,24 @@ struct ice_rx_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 next_to_alloc;
- /* CL2 - 2nd cacheline starts here */
+
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- struct xdp_buff xdp;
+ /* CL2 - 2nd cacheline starts here */
+ union {
+ struct ice_xdp_buff xdp_ext;
+ struct xdp_buff xdp;
+ };
/* CL3 - 3rd cacheline starts here */
+ union {
+ struct ice_pkt_ctx pkt_ctx;
+ struct {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+ };
+ };
struct bpf_prog *xdp_prog;
u16 rx_offset;
@@ -332,9 +356,10 @@ struct ice_rx_ring {
/* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
struct ice_tx_ring *xdp_ring;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
+ u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
- u64 cached_phctime;
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7e06373e14d9..839e5da24ad5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
}
/**
- * ice_rx_hash - set the hash value in the skb
+ * ice_get_rx_hash - get RX hash value from descriptor
+ * @rx_desc: specific descriptor
+ *
+ * Returns hash, if present, 0 otherwise.
+ */
+static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *nic_mdid;
+
+ if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC))
+ return 0;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ return le32_to_cpu(nic_mdid->rss_hash);
+}
+
+/**
+ * ice_rx_hash_to_skb - set the hash value in the skb
* @rx_ring: descriptor ring
* @rx_desc: specific descriptor
* @skb: pointer to current skb
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 rx_ptype)
+ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u16 rx_ptype)
{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
u32 hash;
if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
return;
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ hash = ice_get_rx_hash(rx_desc);
+ if (likely(hash))
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
}
/**
@@ -171,11 +185,38 @@ checksum_fail:
}
/**
+ * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb
+ * @rx_ring: Ring to get the VSI info
+ * @rx_desc: Receive descriptor
+ * @skb: Particular skb to send timestamp with
+ *
+ * The timestamp is in ns, so we must convert the result first.
+ */
+static void
+ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
+}
+
+/**
+ * ice_get_ptype - Read HW packet type from the descriptor
+ * @rx_desc: RX descriptor
+ */
+static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+}
+
+/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -184,9 +225,11 @@ checksum_fail:
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype)
+ struct sk_buff *skb)
{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+ u16 ptype = ice_get_ptype(rx_desc);
+
+ ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
if (rx_ring->ptp_rx)
- ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
}
/**
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
+ * @vlan_tci: VLAN TCI for packet
*
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
{
- netdev_features_t features = rx_ring->netdev->features;
- bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+ if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
+ __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
+ vlan_tci);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
spin_unlock(&xdp_ring->tx_lock);
}
}
+
+/**
+ * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler
+ * @ctx: XDP buff pointer
+ * @ts_ns: destination address
+ *
+ * Copy HW timestamp (if available) to the destination address.
+ */
+static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
+ xdp_ext->pkt_ctx);
+ if (!*ts_ns)
+ return -ENODATA;
+
+ return 0;
+}
+
+/* Define a ptype index -> XDP hash type lookup table.
+ * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
+ * avoiding possible copy-paste errors.
+ */
+#undef ICE_PTT
+#undef ICE_PTT_UNUSED_ENTRY
+
+#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
+
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions
+ * by means of preprocessor concatenation.
+ */
+#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
+#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
+
+static const enum xdp_rss_hash_type
+ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
+ ICE_PTYPES
+};
+
+#undef XDP_RSS_L3_NONE
+#undef XDP_RSS_L4_NONE
+#undef XDP_RSS_TYPE_PAY2
+#undef XDP_RSS_TYPE_PAY3
+#undef XDP_RSS_TYPE_PAY4
+
+#undef ICE_PTT
+#undef ICE_PTT_UNUSED_ENTRY
+
+/**
+ * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
+ * @eop_desc: End of Packet descriptor
+ */
+static enum xdp_rss_hash_type
+ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
+{
+ u16 ptype = ice_get_ptype(eop_desc);
+
+ if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
+ return 0;
+
+ return ice_ptype_to_xdp_hash[ptype];
+}
+
+/**
+ * ice_xdp_rx_hash - RX hash XDP hint handler
+ * @ctx: XDP buff pointer
+ * @hash: hash destination address
+ * @rss_type: XDP hash type destination address
+ *
+ * Copy RX hash (if available) and its type to the destination address.
+ */
+static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *hash = ice_get_rx_hash(xdp_ext->eop_desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ if (!likely(*hash))
+ return -ENODATA;
+
+ return 0;
+}
+
+/**
+ * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler
+ * @ctx: XDP buff pointer
+ * @vlan_proto: destination address for VLAN protocol
+ * @vlan_tci: destination address for VLAN TCI
+ *
+ * Copy VLAN tag (if was stripped) and corresponding protocol
+ * to the destination address.
+ */
+static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ if (!*vlan_proto)
+ return -ENODATA;
+
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ if (!*vlan_tci)
+ return -ENODATA;
+
+ return 0;
+}
+
+const struct xdp_metadata_ops ice_xdp_md_ops = {
+ .xmo_rx_timestamp = ice_xdp_rx_hw_ts,
+ .xmo_rx_hash = ice_xdp_rx_hash,
+ .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag,
+};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 115969ecdf7b..afcead4baef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -12,26 +12,39 @@
* act: action to store onto Rx buffers related to XDP buffer parts
*
* Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
*/
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
const unsigned int act)
{
- const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- u32 first = rx_ring->first_desc;
- u32 nr_frags = sinfo->nr_frags;
+ u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[first];
+ buf = &rx_ring->rx_buf[idx];
buf->act = act;
- if (++first == cnt)
- first = 0;
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ /* adjust pagecnt_bias on frags freed by XDP prog */
+ if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+ u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+ while (delta) {
+ if (idx == 0)
+ idx = cnt - 1;
+ else
+ idx--;
+ buf = &rx_ring->rx_buf[idx];
+ buf->pagecnt_bias--;
+ delta--;
+ }
}
}
@@ -84,7 +97,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
* The OS and current PF implementation only support stripping a single VLAN tag
@@ -92,7 +105,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
* one is found return the tag, else return 0 to mean no VLAN tag was found.
*/
static inline u16
-ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
{
u16 stat_err_bits;
@@ -148,7 +161,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype);
+ struct sk_buff *skb);
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
+
+static inline void
+ice_xdp_meta_set_desc(struct xdp_buff *xdp,
+ union ice_32b_rx_flex_desc *eop_desc)
+{
+ struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
+ xdp_buff);
+
+ xdp_ext->eop_desc = eop_desc;
+}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a18ca0ff879f..a508e917ce5f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,6 +17,7 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
+#include "ice_fwlog.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -246,6 +247,7 @@ struct ice_fd_hw_prof {
int cnt;
u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+ u64 prof_id[ICE_FD_HW_SEG_MAX];
};
/* Common HW capabilities for SW use */
@@ -351,6 +353,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
+#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
struct ice_ts_dev_info {
/* Device specific info */
@@ -364,6 +367,7 @@ struct ice_ts_dev_info {
u8 tmr0_ena;
u8 tmr1_ena;
u8 ts_ll_read;
+ u8 ts_ll_int_read;
};
/* Function specific capabilities */
@@ -377,6 +381,8 @@ struct ice_hw_func_caps {
struct ice_ts_func_info ts_func_info;
};
+#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0
+
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
@@ -385,6 +391,11 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ /* bitmap of supported sensors
+ * bit 0 - internal temperature sensor
+ * bit 31:1 - Reserved
+ */
+ u32 supported_sensors;
};
/* MAC info */
@@ -730,24 +741,6 @@ struct ice_switch_info {
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
-/* FW logging configuration */
-struct ice_fw_log_evnt {
- u8 cfg : 4; /* New event enables to configure */
- u8 cur : 4; /* Current/active event enables */
-};
-
-struct ice_fw_log_cfg {
- u8 cq_en : 1; /* FW logging is enabled via the control queue */
- u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
- u8 actv_evnts; /* Cumulation of currently enabled log events */
-
-#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
- struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
-};
-
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@@ -827,7 +820,7 @@ struct ice_mbx_data {
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
- ICE_PHY_E822,
+ ICE_PHY_E82X,
};
/* Port hardware description */
@@ -889,7 +882,9 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fw_log_cfg fw_log;
+ struct ice_fwlog_cfg fwlog_cfg;
+ bool fwlog_supported; /* does hardware support FW logging? */
+ struct ice_fwlog_ring fwlog_ring;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -910,10 +905,9 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC_E822 1
#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E822 2
-#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_QUADS_PER_PHY_E82X 2
+#define ICE_PORTS_PER_PHY_E82X 8
#define ICE_PORTS_PER_QUAD 4
#define ICE_PORTS_PER_PHY_E810 4
#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
@@ -1009,7 +1003,6 @@ struct ice_hw_port_stats {
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
- u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
@@ -1048,6 +1041,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_MIRROR_PACKET,
ICE_NOP,
ICE_INVAL_ACT
};
@@ -1078,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
-#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index b7ae09952156..2ffdae9a82df 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -248,29 +248,44 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
}
/**
- * ice_vf_recreate_vsi - Release and re-create the VF's VSI
- * @vf: VF to recreate the VSI for
+ * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
+ * @vf: VF to reconfigure the VSI for
*
- * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
- * VF configuration change, etc)
+ * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
+ * configuration change, etc).
*
- * It releases and then re-creates a new VSI.
+ * It brings the VSI down and then reconfigures it with the hardware.
*/
-static int ice_vf_recreate_vsi(struct ice_vf *vf)
+int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
int err;
- ice_vf_vsi_release(vf);
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_NO_INIT;
+
+ ice_vsi_decfg(vsi);
+ ice_fltr_remove_all(vsi);
- err = vf->vf_ops->create_vsi(vf);
+ err = ice_vsi_cfg(vsi, &params);
if (err) {
dev_err(ice_pf_to_dev(pf),
- "Failed to recreate the VF%u's VSI, error %d\n",
+ "Failed to reconfigure the VF%u's VSI, error %d\n",
vf->vf_id, err);
return err;
}
+ /* Update the lan_vsi_num field since it might have been changed. The
+ * PF lan_vsi_idx number remains the same so we don't need to change
+ * that.
+ */
+ vf->lan_vsi_num = vsi->vsi_num;
+
return 0;
}
@@ -760,6 +775,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
+ ice_eswitch_detach(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -775,13 +791,11 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
+ ice_eswitch_attach(pf, vf);
+
mutex_unlock(&vf->cfg_lock);
}
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -929,7 +943,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_vf_pre_vsi_rebuild(vf);
- if (ice_vf_recreate_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf)) {
dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
vf->vf_id);
err = -EFAULT;
@@ -943,7 +957,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vsi);
+ ice_eswitch_update_repr(vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 93c774f2f437..0cc9034065c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -62,7 +62,6 @@ struct ice_vf_ops {
bool (*poll_reset_status)(struct ice_vf *vf);
void (*clear_reset_trigger)(struct ice_vf *vf);
void (*irq_close)(struct ice_vf *vf);
- int (*create_vsi)(struct ice_vf *vf);
void (*post_vsi_rebuild)(struct ice_vf *vf);
};
@@ -130,7 +129,7 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
- struct ice_repr *repr;
+ unsigned long repr_id;
const struct ice_virtchnl_ops *virtchnl_ops;
const struct ice_vf_ops *vf_ops;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..91ba7fe0eaee 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,6 +23,7 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
+int ice_vf_reconfig_vsi(struct ice_vf *vf);
void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1c7b4ded948b..6f2328a049bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg;
else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps);
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
@@ -689,9 +682,7 @@ out:
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
* @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @hash_cfg: pointer to the HW hash configuration
*
* Return true if all the protocol header and hash fields in the RSS cfg could
* be parsed, else return false
@@ -699,13 +690,23 @@ out:
* This function parses the virtchnl RSS cfg to be the intended
* hash fields and the intended header for RSS configuration
*/
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
- u32 *addl_hdrs, u64 *hash_flds)
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
{
const struct ice_vc_hash_field_match_type *hf_list;
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
hf_list = ice_vc_hash_field_list;
hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
@@ -823,8 +824,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -832,11 +833,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
- ctx->info.q_opt_rss = ((lut_type <<
- ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ ctx->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
/* Preserve existing queueing option setting */
ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
@@ -858,18 +857,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
kfree(ctx);
} else {
- u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- u64 hash_flds = ICE_HASH_INVALID;
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
- &hash_flds)) {
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add) {
- if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs)) {
+ if (ice_add_rss_cfg(hw, vsi, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
vsi->vsi_num, v_ret);
@@ -877,8 +882,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
} else {
int status;
- status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs);
+ status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
/* We just ignore -ENOENT, because if two configurations
* share the same profile remove one of them actually
* removes both, since the profile is deleted.
@@ -989,6 +993,51 @@ error_param:
}
/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2634,7 +2683,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
}
if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
v_ret = ice_err_to_virt_err(status);
}
@@ -3046,7 +3095,7 @@ static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
{
struct ice_vlan vlan = { 0 };
- vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
vlan.tpid = vc_vlan->tpid;
@@ -3755,6 +3804,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -3884,6 +3934,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -4066,6 +4117,9 @@ error_handler:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
err = ops->config_rss_lut(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ err = ops->config_rss_hfunc(vf, msg);
+ break;
case VIRTCHNL_OP_GET_STATS:
err = ops->get_stats_msg(vf, msg);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index cd747718de73..60dfbe05980a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -32,6 +32,7 @@ struct ice_virtchnl_ops {
int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg);
int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 7d547fa616fa..d796dbd2a440 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
@@ -68,6 +66,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 24b23b7ef04a..f001553e1a1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -10,19 +10,6 @@
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
-#define ICE_FLOW_PROF_TYPE_S 0
-#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
-#define ICE_FLOW_PROF_VSI_S 32
-#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
-
-/* Flow profile ID format:
- * [0:31] - flow type, flow + tun_offs
- * [32:63] - VSI index
- */
-#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
- ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
- (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
-
#define GTPU_TEID_OFFSET 4
#define GTPU_EH_QFI_OFFSET 1
#define GTPU_EH_QFI_MASK 0x3F
@@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
return;
vf_prof = fdir->fdir_prof[flow];
+ prof_id = vf_prof->prof_id[tun];
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
@@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
if (!fdir->prof_entry_cnt[flow][tun])
return;
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
- flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
-
for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
if (vf_prof->entry_h[i][tun]) {
u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
@@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0;
- u64 prof_id;
int ret;
pf = vf->pf;
@@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
ice_vc_fdir_rem_prof(vf, flow, tun);
}
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
- tun ? ICE_FLTR_PTYPE_MAX : 0);
-
- ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ tun + 1, false, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (ret) {
@@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
goto err_prof;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (ret) {
@@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
+ vf_prof->prof_id[tun] = prof->id;
+
return 0;
err_entry_1:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
err_exit:
return ret;
}
@@ -1480,16 +1463,15 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
int ret;
stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
- if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
- ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
+ if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
+ ICE_FXD_FLTR_WB_QW1_DD_YES) {
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
ret = -EINVAL;
goto err_exit;
}
- prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
- ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
+ prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show add, but ctx not",
@@ -1508,8 +1490,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
goto err_exit;
}
- error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
- ICE_FXD_FLTR_WB_QW1_FAIL_S;
+ error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
@@ -1524,8 +1505,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
goto err_exit;
}
- error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
- ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
+ error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 76266e709a39..2e9ad27cb9d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -131,6 +131,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
+ u8 *ivf;
int err;
/* do not allow modifying VLAN stripping when a port VLAN is configured
@@ -143,19 +144,24 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
if (!ctxt)
return -ENOMEM;
+ ivf = &ctxt->info.inner_vlan_flags;
+
/* Here we are configuring what the VSI should do with the VLAN tag in
* the Rx packet. We can either leave the tag in the packet or put it in
* the Rx descriptor.
*/
- if (ena)
+ if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
- else
+ *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH);
+ } else {
/* Disable stripping. Leave tag in packet */
- ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
+ }
/* Allow all packets untagged/tagged */
- ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+ *ivf |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
@@ -481,10 +487,11 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |=
- ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
- ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+ /* we want EMODE_SHOW_BOTH, but that value is zero, so the line
+ * above clears it well enough that we don't need to try to set
+ * zero here, so just do the tag type
+ */
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -589,11 +596,9 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |=
- ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL) |
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -642,9 +647,8 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
ctxt->info.outer_vlan_flags |=
ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
- ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -702,8 +706,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
ctxt->info.outer_vlan_flags =
(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type) |
ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 99954508184f..2eecd0f39aa6 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
return -EBUSY;
usleep_range(1000, 2000);
}
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
@@ -259,11 +260,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
- clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return 0;
}
@@ -458,6 +459,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
+ /* Put private info that changes on a per-packet basis
+ * into xdp_buff_xsk->cb.
+ */
+ ice_xdp_meta_set_desc(*xdp, rx_desc);
+
rx_desc++;
xdp++;
}
@@ -820,7 +826,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -863,8 +870,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -891,7 +897,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (!first) {
first = xdp;
- xdp_buff_clear_frags_flag(first);
} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
break;
}
@@ -942,13 +947,10 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
-
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
+ vlan_tci = ice_get_vlan_tci(rx_desc);
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
}
rx_ring->next_to_clean = ntc;
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index bee73353b56a..0acc125decb3 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -15,7 +15,7 @@ struct idpf_vport_max_q;
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
-#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <net/gro.h>
#include <linux/dim.h>
@@ -418,11 +418,13 @@ struct idpf_vport {
/**
* enum idpf_user_flags
+ * @__IDPF_USER_FLAG_HSPLIT: header split state
* @__IDPF_PROMISC_UC: Unicast promiscuous mode
* @__IDPF_PROMISC_MC: Multicast promiscuous mode
* @__IDPF_USER_FLAGS_NBITS: Must be last
*/
enum idpf_user_flags {
+ __IDPF_USER_FLAG_HSPLIT = 0U,
__IDPF_PROMISC_UC = 32,
__IDPF_PROMISC_MC,
@@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
+u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
+bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
+
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 52ea38669f85..986d429d1175 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -75,14 +75,12 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
/**
* idpf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
*/
-static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int idpf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@@ -103,15 +101,14 @@ static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (key)
- memcpy(key, rss_data->rss_key, rss_data->rss_key_size);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
- indir[i] = rss_data->rss_lut[i];
+ rxfh->indir[i] = rss_data->rss_lut[i];
}
unlock_mutex:
@@ -123,15 +120,15 @@ unlock_mutex:
/**
* idpf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
-static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int idpf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@@ -154,17 +151,18 @@ static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
err = -EOPNOTSUPP;
goto unlock_mutex;
}
- if (key)
- memcpy(rss_data->rss_key, key, rss_data->rss_key_size);
+ if (rxfh->key)
+ memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
for (lut = 0; lut < rss_data->rss_lut_size; lut++)
- rss_data->rss_lut[lut] = indir[lut];
+ rss_data->rss_lut[lut] = rxfh->indir[lut];
}
err = idpf_config_rss(vport);
@@ -320,6 +318,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_pending = vport->rxq_desc_count;
ring->tx_pending = vport->txq_desc_count;
+ kring->tcp_data_split = idpf_vport_get_hsplit(vport);
+
idpf_vport_ctrl_unlock(netdev);
}
@@ -379,6 +379,14 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_rx_count == vport->rxq_desc_count)
goto unlock_mutex;
+ if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
+ NL_SET_ERR_MSG_MOD(ext_ack,
+ "setting TCP data split is not supported");
+ err = -EOPNOTSUPP;
+
+ goto unlock_mutex;
+ }
+
config_data = &vport->adapter->vport_config[idx]->user_config;
config_data->num_req_txq_desc = new_tx_count;
config_data->num_req_rxq_desc = new_rx_count;
@@ -532,7 +540,7 @@ static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
unsigned int i;
for (i = 0; i < size; i++)
- ethtool_sprintf(p, "%s", stats[i].stat_string);
+ ethtool_puts(p, stats[i].stat_string);
}
/**
@@ -1334,6 +1342,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_msglevel = idpf_get_msglevel,
.set_msglevel = idpf_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 19809b0ddcd9..58179bd733ff 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
+ netdev->dev_port = idx;
+
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;
@@ -1058,6 +1060,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
}
/**
+ * idpf_is_hsplit_supported - check whether the header split is supported
+ * @vport: virtual port to check the capability for
+ *
+ * Return: true if it's supported by the HW/FW, false if not.
+ */
+static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
+{
+ return idpf_is_queue_model_split(vport->rxq_model) &&
+ idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT);
+}
+
+/**
+ * idpf_vport_get_hsplit - get the current header split feature state
+ * @vport: virtual port to query the state for
+ *
+ * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
+ * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
+ * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
+ */
+u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
+{
+ const struct idpf_vport_user_config_data *config;
+
+ if (!idpf_is_hsplit_supported(vport))
+ return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+
+ config = &vport->adapter->vport_config[vport->idx]->user_config;
+
+ return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+/**
+ * idpf_vport_set_hsplit - enable or disable header split on a given vport
+ * @vport: virtual port to configure
+ * @val: Ethtool flag controlling the header split state
+ *
+ * Return: true on success, false if not supported by the HW.
+ */
+bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
+{
+ struct idpf_vport_user_config_data *config;
+
+ if (!idpf_is_hsplit_supported(vport))
+ return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+
+ config = &vport->adapter->vport_config[vport->idx]->user_config;
+
+ switch (val) {
+ case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
+ /* Default is to enable */
+ case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
+ __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
+ return true;
+ case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
+ __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* idpf_vport_alloc - Allocates the next available struct vport in the adapter
* @adapter: board private structure
* @max_q: vport max queue info
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 20c4b3a64710..27b93592c4ba 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -328,10 +328,9 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
if (offload->tso_segs) {
qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
- qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
- IDPF_TXD_CTX_QW1_TSO_LEN_M;
- qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) &
- IDPF_TXD_CTX_QW1_MSS_M;
+ qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_TSO_LEN_M,
+ offload->tso_len);
+ qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.tx.lso_pkts);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 9e942e5baf39..2f8ad79ae3f0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -505,9 +505,9 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
- (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
- IDPF_RX_BI_GEN_S);
+ FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RX_BI_GEN_M,
+ test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
@@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue *q;
int i, k, err = 0;
+ bool hs;
vport->rxq_grps = kcalloc(vport->num_rxq_grp,
sizeof(struct idpf_rxq_group), GFP_KERNEL);
if (!vport->rxq_grps)
return -ENOMEM;
+ hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
int j;
@@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
- IDPF_CAP_HSPLIT) &&
- idpf_is_queue_model_split(vport->rxq_model)) {
+
+ if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
@@ -1344,9 +1346,7 @@ skip_splitq_rx_init:
rx_qgrp->splitq.rxq_sets[j]->refillq1 =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
- IDPF_CAP_HSPLIT) &&
- idpf_is_queue_model_split(vport->rxq_model)) {
+ if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
@@ -1825,14 +1825,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+ gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+ rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
dev_err(&complq->vport->adapter->pdev->dev,
@@ -1842,9 +1842,8 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
- IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+ ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
@@ -1945,11 +1944,10 @@ void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
u16 td_cmd, u16 size)
{
desc->q.qw1.cmd_dtype =
- cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
+ le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
desc->q.qw1.cmd_dtype |=
- cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
- IDPF_FLEX_TXD_QW1_CMD_M);
- desc->q.qw1.buf_size = cpu_to_le16((u16)size);
+ le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
+ desc->q.qw1.buf_size = cpu_to_le16(size);
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
}
@@ -2843,8 +2841,9 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
qword1);
csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
- le16_to_cpu(rx_desc->ptype_err_fflags0));
+ csum->raw_csum_inv =
+ le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
}
@@ -2938,8 +2937,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
struct idpf_rx_ptype_decoded decoded;
u16 rx_ptype;
- rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
- le16_to_cpu(rx_desc->ptype_err_fflags0));
+ rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
@@ -2953,8 +2952,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
- if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
- le16_to_cpu(rx_desc->hdrlen_flags)))
+ if (le16_get_bits(rx_desc->hdrlen_flags,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
@@ -3148,8 +3147,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
dma_rmb();
/* if the descriptor isn't done, no work yet to do */
- gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
+ gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
break;
@@ -3164,9 +3163,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
continue;
}
- pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
- pkt_len);
+ pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
rx_desc->status_err0_qw1);
@@ -3183,14 +3181,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
goto bypass_hsplit;
}
- hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
- hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
- hdr_len);
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
bypass_hsplit:
- bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
- bufq_id);
+ bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
if (!bufq_id)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 2c1b051fdc0d..390977a76de2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2087,8 +2087,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */
+ local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
+ local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
@@ -3285,6 +3287,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
+ idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
+
idpf_vport_init_num_qs(vport, vport_msg);
idpf_vport_calc_num_q_desc(vport);
idpf_vport_calc_num_q_groups(vport);
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 8dc837889723..4a3c4454d25a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -978,7 +978,7 @@ struct virtchnl2_ptype {
u8 proto_id_count;
__le16 pad;
__le16 proto_id[];
-};
+} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8d6e44ee1895..64dfc362d1dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -222,8 +222,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
}
/* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ hw->bus.func = FIELD_GET(E1000_STATUS_FUNC_MASK, rd32(E1000_STATUS));
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
@@ -262,8 +261,8 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
if (ret_val)
goto out;
- data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
- E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ data = FIELD_GET(E1000_M88E1112_MAC_CTRL_1_MODE_MASK,
+ data);
if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
data == E1000_M88E1112_AUTO_COPPER_BASEX)
hw->mac.ops.check_for_link =
@@ -330,8 +329,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
u32 eecd = rd32(E1000_EECD);
u16 size;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -2798,7 +2796,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
@@ -2808,10 +2806,8 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
for (i = 1; i < num_sensors; i++) {
hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
+ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
if (sensor_location != 0)
hw->phy.ops.read_i2c_byte(hw,
@@ -2859,20 +2855,17 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
- low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
- NVM_ETS_LTHRES_DELTA_SHIFT);
+ low_thresh_delta = FIELD_GET(NVM_ETS_LTHRES_DELTA_MASK, ets_cfg);
num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
for (i = 1; i <= num_sensors; i++) {
hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
+ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
hw->phy.ops.write_i2c_byte(hw,
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index b9b9d35494d2..503b239868e8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -5,9 +5,9 @@
* e1000_i211
*/
-#include <linux/types.h>
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
-
+#include <linux/types.h>
#include "e1000_hw.h"
#include "e1000_i210.h"
@@ -473,7 +473,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
status = 0;
break;
}
@@ -483,8 +483,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
(i != 1))) {
- version = (*next_record & E1000_INVM_VER_FIELD_TWO)
- >> 13;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_TWO,
+ *next_record);
status = 0;
break;
}
@@ -493,15 +493,15 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
*/
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
status = 0;
break;
}
}
if (!status) {
- invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
- >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_major = FIELD_GET(E1000_INVM_MAJOR_MASK,
+ version);
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
}
/* Read Image Type */
@@ -520,7 +520,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
- (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ FIELD_GET(E1000_INVM_IMGTYPE_FIELD,
+ *next_record);
status = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index caf91c6f52b4..fa3dfafd2bb1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -50,13 +51,12 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
break;
}
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT);
+ bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
+ pcie_link_status);
}
reg = rd32(E1000_STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fa136e6e9328..2dcd64d6dec3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
-#include <linux/if_ether.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
-
+#include <linux/if_ether.h>
#include "e1000_mac.h"
#include "e1000_nvm.h"
@@ -708,10 +708,10 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
*/
if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
+ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK,
+ fw_version);
+ fw_vers->eep_minor = FIELD_GET(NVM_MINOR_MASK,
+ fw_version);
fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
goto etrack_id;
}
@@ -753,15 +753,13 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
return;
}
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK, fw_version);
/* check for old style version format in newer images*/
if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
} else {
- eeprom_verl = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
+ eeprom_verl = FIELD_GET(NVM_MINOR_MASK, fw_version);
}
/* Convert minor value to hex before assigning to output struct
* Val to be converted will not be higher than 99, per tool output
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index a018000f7db9..cd65008c7ef5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
-#include <linux/if_ether.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
-
+#include <linux/if_ether.h>
#include "e1000_mac.h"
#include "e1000_phy.h"
@@ -255,7 +255,7 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
}
/* Need to byte-swap the 16-bit value. */
- *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+ *data = ((i2ccmd >> 8) & 0x00FF) | FIELD_PREP(0xFF00, i2ccmd);
return 0;
}
@@ -282,7 +282,7 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
}
/* Swap the data bytes for the I2C interface */
- phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+ phy_data_swapped = ((data >> 8) & 0x00FF) | FIELD_PREP(0xFF00, data);
/* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
@@ -1682,8 +1682,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -1796,8 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
if (ret_val)
goto out;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -2578,8 +2576,7 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = FIELD_GET(I82580_DSTATUS_CABLE_LENGTH, phy_data);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
ret_val = -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index a2b759531cb7..3c2dc7bdebb5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -637,7 +637,7 @@ struct igb_adapter {
struct timespec64 period;
} perout[IGB_N_PEROUT];
- char fw_version[32];
+ char fw_version[48];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
bool ets;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 16d2a55d5e17..b66199c9bb3a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2356,11 +2356,9 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igb_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, igb_gstrings_stats[i].stat_string);
for (i = 0; i < IGB_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igb_gstrings_net_stats[i].stat_string);
+ ethtool_puts(&p, igb_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -2434,7 +2432,7 @@ static int igb_get_ts_info(struct net_device *dev,
}
}
-#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(FIELD_MAX(U16_MAX))
static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -2713,8 +2711,7 @@ static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter,
etqf |= (etype & E1000_ETQF_ETYPE_MASK);
etqf &= ~E1000_ETQF_QUEUE_MASK;
- etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT)
- & E1000_ETQF_QUEUE_MASK);
+ etqf |= FIELD_PREP(E1000_ETQF_QUEUE_MASK, input->action);
etqf |= E1000_ETQF_QUEUE_ENABLE;
wr32(E1000_ETQF(i), etqf);
@@ -2733,8 +2730,8 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
u32 vlapqf;
vlapqf = rd32(E1000_VLAPQF);
- vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
- >> VLAN_PRIO_SHIFT;
+ vlan_priority = FIELD_GET(VLAN_PRIO_MASK,
+ ntohs(input->filter.vlan_tci));
queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK;
/* check whether this vlan prio is already set */
@@ -2817,7 +2814,7 @@ static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter,
u8 vlan_priority;
u32 vlapqf;
- vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan_priority = FIELD_GET(VLAN_PRIO_MASK, vlan_tci);
vlapqf = rd32(E1000_VLAPQF);
vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority);
@@ -3282,18 +3279,17 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igb_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < IGB_RETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
@@ -3333,8 +3329,9 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igb_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3342,10 +3339,11 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
u32 num_queues;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
@@ -3362,12 +3360,12 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < IGB_RETA_SIZE; i++)
- if (indir[i] >= num_queues)
+ if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGB_RETA_SIZE; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
igb_write_rss_indir_tbl(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b2295caa2f0a..cebb44f51d5f 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
/* if option rom is valid, display its version too */
if (fw.or_valid) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id, fw.or_major, fw.or_build,
- fw.or_patch);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
- fw.eep_minor, fw.eep_build);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
-
- /* the truncate happens here if it doesn't fit */
- strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
- kfree(lbuf);
}
/**
@@ -7295,7 +7296,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static int igb_set_vf_multicasts(struct igb_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
int i;
@@ -7555,7 +7556,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
{
- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
int ret;
@@ -9810,8 +9811,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
- E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int);
bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
} else {
bcnrc_val = 0;
@@ -10000,8 +10000,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
hwm = 64 * (pba - 6);
reg = rd32(E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
- reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
- & E1000_FCRTC_RTH_COAL_MASK);
+ reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm);
wr32(E1000_FCRTC, reg);
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
@@ -10010,8 +10009,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
- & E1000_DMACR_DMACTHR_MASK);
+ reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr);
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 319c544b9f04..f94570556120 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index a3cd7ac48d4b..d15282ee5ea8 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include "mbx.h"
/**
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index fd712585af27..a4d4f00e6a87 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -3,25 +3,25 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/tcp.h>
-#include <linux/ipv6.h>
-#include <linux/slab.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/ipv6.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
-
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include "igbvf.h"
char igbvf_driver_name[] = "igbvf";
@@ -273,9 +273,8 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
* that case, it fills the header buffer and spills the rest
* into the page.
*/
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
- & E1000_RXDADV_HDRBUFLEN_MASK) >>
- E1000_RXDADV_HDRBUFLEN_SHIFT;
+ hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info,
+ E1000_RXDADV_HDRBUFLEN_MASK);
if (hlen > adapter->rx_ps_hdr_size)
hlen = adapter->rx_ps_hdr_size;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 85cc16396506..45430e246e9c 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -81,6 +81,21 @@ struct igc_tx_timestamp_request {
u32 flags; /* flags that should be added to the tx_buffer */
};
+struct igc_inline_rx_tstamps {
+ /* Timestamps are saved in little endian at the beginning of the packet
+ * buffer following the layout:
+ *
+ * DWORD: | 0 | 1 | 2 | 3 |
+ * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
+ *
+ * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
+ * part of the timestamp.
+ *
+ */
+ __le32 timer1[2];
+ __le32 timer0[2];
+};
+
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -261,6 +276,8 @@ struct igc_adapter {
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
+ /* Free-running timer lock */
+ spinlock_t free_timer_lock;
struct cyclecounter cc;
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
@@ -469,6 +486,8 @@ enum igc_tx_flags {
IGC_TX_FLAGS_TSTAMP_1 = 0x100,
IGC_TX_FLAGS_TSTAMP_2 = 0x200,
IGC_TX_FLAGS_TSTAMP_3 = 0x400,
+
+ IGC_TX_FLAGS_TSTAMP_TIMER_1 = 0x800,
};
enum igc_boards {
@@ -531,7 +550,7 @@ struct igc_rx_buffer {
struct igc_xdp_buff {
struct xdp_buff xdp;
union igc_adv_rx_desc *rx_desc;
- ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
+ struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
};
struct igc_q_vector {
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index a1d815af507d..9fae8bdec2a7 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,8 +68,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
- size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
- IGC_EECD_SIZE_EX_SHIFT);
+ size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -162,8 +161,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->reset_delay_us = 100;
/* set lan id */
- hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
- IGC_STATUS_FUNC_SHIFT;
+ hw->bus.func = FIELD_GET(IGC_STATUS_FUNC_MASK, rd32(IGC_STATUS));
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index f7d6491d4c60..bf8cdfbba9ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -37,6 +37,10 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */
#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */
#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_1 0x00010000 /* Select timer 1 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_2 0x00020000 /* Select timer 2 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_3 0x00030000 /* Select timer 3 for timestamp */
+
#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index b3037016f31d..5f92b3c7c3d4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -317,6 +317,8 @@
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+#define IGC_TXD_PTP2_TIMER_1 0x00000020
+
/* IPSec Encrypt Enable */
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 859b2636f3d9..b95d2c86e803 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -773,11 +773,9 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
break;
case ETH_SS_STATS:
for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igc_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, igc_gstrings_stats[i].stat_string);
for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igc_gstrings_net_stats[i].stat_string);
+ ethtool_puts(&p, igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -1464,45 +1462,46 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev)
return IGC_RETA_SIZE;
}
-static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igc_ethtool_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < IGC_RETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
-static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igc_ethtool_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 num_queues;
int i;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
/* Verify user input. */
for (i = 0; i < IGC_RETA_SIZE; i++)
- if (indir[i] >= num_queues)
+ if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGC_RETA_SIZE; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
igc_write_rss_indir_tbl(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 17546a035ab1..0dd61719f1ed 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include "igc_hw.h"
@@ -578,9 +579,8 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
/* Calculate tw_system (nsec). */
if (speed == SPEED_100) {
- tw_system = ((rd32(IGC_EEE_SU) &
- IGC_TW_SYSTEM_100_MASK) >>
- IGC_TW_SYSTEM_100_SHIFT) * 500;
+ tw_system = FIELD_GET(IGC_TW_SYSTEM_100_MASK,
+ rd32(IGC_EEE_SU)) * 500;
} else {
tw_system = (rd32(IGC_EEE_SU) &
IGC_TW_SYSTEM_1000_MASK) * 500;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e9bb403bbacf..81c21a893ede 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1299,14 +1299,16 @@ static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
/* insert L4 checksum */
- olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
- ((IGC_TXD_POPTS_TXSM << 8) /
- IGC_TX_FLAGS_CSUM);
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM,
+ (IGC_TXD_POPTS_TXSM << 8));
/* insert IPv4 checksum */
- olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
- (((IGC_TXD_POPTS_IXSM << 8)) /
- IGC_TX_FLAGS_IPV4);
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4,
+ (IGC_TXD_POPTS_IXSM << 8));
+
+ /* Use the second timer (free running, in general) for the timestamp */
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1,
+ IGC_TXD_PTP2_TIMER_1);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -1651,6 +1653,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
}
@@ -1963,9 +1967,9 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
@@ -1982,8 +1986,10 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
/* Determine available headroom for copy */
headlen = size;
@@ -2583,11 +2589,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
int xdp_status = 0, rx_buffer_pgcnt;
while (likely(total_packets < budget)) {
- union igc_adv_rx_desc *rx_desc;
+ struct igc_xdp_buff ctx = { .rx_ts = NULL };
struct igc_rx_buffer *rx_buffer;
+ union igc_adv_rx_desc *rx_desc;
unsigned int size, truesize;
- struct igc_xdp_buff ctx;
- ktime_t timestamp = 0;
int pkt_offset = 0;
void *pktbuf;
@@ -2614,9 +2619,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
- timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
- pktbuf);
- ctx.rx_ts = timestamp;
+ ctx.rx_ts = pktbuf;
pkt_offset = IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
@@ -2653,8 +2656,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
else if (ring_uses_build_skb(rx_ring))
skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
else
- skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
- timestamp);
+ skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2803,9 +2805,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
ctx->rx_desc = desc;
if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
- timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
- bi->xdp->data);
- ctx->rx_ts = timestamp;
+ ctx->rx_ts = bi->xdp->data;
bi->xdp->data += IGC_TS_HDR_LEN;
@@ -3452,8 +3452,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
/* Configure filter */
queuing = input->length & IGC_FHFT_LENGTH_MASK;
- queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
- queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
+ queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue);
+ queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio);
if (input->immediate_irq)
queuing |= IGC_FHFT_IMM_INT;
@@ -3712,8 +3712,7 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
if (err)
@@ -3735,8 +3734,7 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
igc_del_etype_filter(adapter, rule->filter.etype);
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
igc_del_vlan_prio_filter(adapter, prio);
}
@@ -6489,7 +6487,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
- int i, drops;
+ int i, nxmit;
if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
@@ -6505,16 +6503,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
- drops = 0;
+ nxmit = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err)
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH)
@@ -6522,7 +6519,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq);
- return num_frames - drops;
+ return nxmit;
}
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
@@ -6562,6 +6559,24 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return 0;
}
+static ktime_t igc_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_inline_rx_tstamps *tstamp;
+ ktime_t timestamp;
+
+ tstamp = hwtstamps->netdev_data;
+
+ if (cycles)
+ timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1);
+ else
+ timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
+
+ return timestamp;
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -6579,6 +6594,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
.ndo_xsk_wakeup = igc_xsk_wakeup,
+ .ndo_get_tstamp = igc_get_tstamp,
};
/* PCIe configuration access */
@@ -6682,9 +6698,11 @@ static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct igc_xdp_buff *ctx = (void *)_ctx;
+ struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
+ struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts;
if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
- *timestamp = ctx->rx_ts;
+ *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 53b77c969c85..861f37076861 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
+#include <linux/bitfield.h>
#include "igc_phy.h"
/**
@@ -129,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* Temporary workaround - should be removed when PHY will implement
- * IEEE registers as properly
- */
- /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usleep_range(1000, 2000);
}
@@ -726,7 +723,7 @@ static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
*/
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
{
- u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset);
s32 ret_val;
offset = offset & GPY_REG_MASK;
@@ -757,7 +754,7 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
*/
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
{
- u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset);
s32 ret_val;
offset = offset & GPY_REG_MASK;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 928f38792203..885faaa7b9de 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -459,12 +459,10 @@ static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
/**
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @adapter: Pointer to adapter the packet buffer belongs to
- * @buf: Pointer to packet buffer
+ * @buf: Pointer to start of timestamp in HW format (2 32-bit words)
*
- * This function retrieves the timestamp saved in the beginning of packet
- * buffer. While two timestamps are available, one in timer0 reference and the
- * other in timer1 reference, this function considers only the timestamp in
- * timer0 reference.
+ * This function retrieves and converts the timestamp stored at @buf
+ * to ktime_t, adjusting for hardware latencies.
*
* Returns timestamp value.
*/
@@ -474,17 +472,8 @@ ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf)
u32 secs, nsecs;
int adjust;
- /* Timestamps are saved in little endian at the beginning of the packet
- * buffer following the layout:
- *
- * DWORD: | 0 | 1 | 2 | 3 |
- * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
- *
- * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
- * part of the timestamp.
- */
- nsecs = le32_to_cpu(buf[2]);
- secs = le32_to_cpu(buf[3]);
+ nsecs = le32_to_cpu(buf[0]);
+ secs = le32_to_cpu(buf[1]);
timestamp = ktime_set(secs, nsecs);
@@ -542,10 +531,11 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
val = rd32(IGC_SRRCTL(i));
- /* FIXME: For now, only support retrieving RX timestamps from
- * timer 0.
+ /* Enable retrieving timestamps from timer 0, the
+ * "adjustable clock" and timer 1 the "free running
+ * clock".
*/
- val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+ val |= IGC_SRRCTL_TIMER1SEL(1) | IGC_SRRCTL_TIMER0SEL(0) |
IGC_SRRCTL_TIMESTAMP;
wr32(IGC_SRRCTL(i), val);
}
@@ -1035,6 +1025,26 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
adapter, &adapter->snapshot, cts);
}
+static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter, ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->free_timer_lock, flags);
+
+ ptp_read_system_prets(sts);
+ ts->tv_nsec = rd32(IGC_SYSTIML_1);
+ ts->tv_sec = rd32(IGC_SYSTIMH_1);
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&igc->free_timer_lock, flags);
+
+ return 0;
+}
+
/**
* igc_ptp_init - Initialize PTP functionality
* @adapter: Board private structure
@@ -1088,6 +1098,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
+ adapter->ptp_caps.getcyclesx64 = igc_ptp_getcyclesx64;
adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
adapter->ptp_caps.pps = 1;
@@ -1108,6 +1119,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
}
spin_lock_init(&adapter->ptp_tx_lock);
+ spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 20e17f5fbce3..d38c87d7e5e8 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -243,6 +243,11 @@
#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define IGC_SYSTIML_1 0x0B688 /* System time register Low - RO (timer 1) */
+#define IGC_SYSTIMH_1 0x0B68C /* System time register High - RO (timer 1) */
+#define IGC_SYSTIMR_1 0x0B684 /* System time register Residue (timer 1) */
+#define IGC_TIMINCA_1 0x0B690 /* Increment attributes register - RW (timer 1) */
+
/* TX Timestamp Low */
#define IGC_TXSTMPL_0 0x0B618
#define IGC_TXSTMPL_1 0x0B698
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 100388968e4d..6835d5f18753 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -123,14 +123,14 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
&list_offset,
&data_offset);
if (ret_val)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
break;
default:
break;
@@ -213,7 +213,7 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
break;
default:
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
return 0;
@@ -283,7 +283,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/* Validate the water mark configuration */
if (!hw->fc.pause_time)
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -292,7 +292,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
}
}
@@ -369,7 +369,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Set 802.3x based flow control settings. */
@@ -438,7 +438,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autonegotiation did not complete.\n");
}
}
@@ -478,7 +478,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
hw_dbg(hw, "Link was indicated but link is down\n");
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
return 0;
@@ -594,7 +594,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
@@ -701,9 +701,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
/* Init PHY and function pointers, perform SFP setup */
phy_status = hw->phy.ops.init(hw);
- if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (phy_status == -EOPNOTSUPP)
return phy_status;
- if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (phy_status == -ENOENT)
goto mac_reset_top;
hw->phy.ops.reset(hw);
@@ -727,7 +727,7 @@ mac_reset_top:
udelay(1);
}
if (ctrl & IXGBE_CTRL_RST) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -789,12 +789,12 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
rar_high &= ~IXGBE_RAH_VIND_MASK;
- rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq);
IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
return 0;
}
@@ -814,7 +814,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
@@ -845,7 +845,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
u32 vftabyte;
if (vlan > 4095)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Determine 32-bit word position in array */
regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
@@ -964,7 +964,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
gssr = IXGBE_GSSR_PHY0_SM;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
if (hw->phy.type == ixgbe_phy_nl) {
/*
@@ -993,7 +993,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
hw_dbg(hw, "EEPROM read did not pass.\n");
- status = IXGBE_ERR_SFP_NOT_PRESENT;
+ status = -ENOENT;
goto out;
}
@@ -1003,7 +1003,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
- status = IXGBE_ERR_PHY;
+ status = -EIO;
}
out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 58ea959a4482..339e106a5732 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -117,7 +117,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
goto setup_sfp_err;
@@ -144,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ return -EIO;
}
}
@@ -159,7 +159,7 @@ setup_sfp_err:
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ return -EIO;
}
/**
@@ -184,7 +184,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
*locked = true;
}
@@ -219,7 +219,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
locked = true;
}
@@ -400,7 +400,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
break;
default:
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
if (hw->phy.multispeed_fiber) {
@@ -541,7 +541,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -794,7 +794,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
@@ -861,8 +861,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -927,7 +926,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
/* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Setup SFP module if there is one present. */
@@ -936,7 +935,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Reset PHY */
@@ -974,7 +973,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -1093,7 +1092,7 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
udelay(10);
}
- return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+ return -EIO;
}
/**
@@ -1155,7 +1154,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
+ return -EIO;
}
/* Clear FDIR statistics registers (read to clear) */
@@ -1387,7 +1386,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* configure FDIRCMD register */
@@ -1546,7 +1545,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on vm pool mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
@@ -1555,14 +1554,14 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
if (input_mask->formatted.dst_port ||
input_mask->formatted.src_port) {
hw_dbg(hw, " Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
break;
case IXGBE_ATR_L4TYPE_MASK:
break;
default:
hw_dbg(hw, " Error on flow type mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
@@ -1583,7 +1582,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on VLAN mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
@@ -1595,7 +1594,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
@@ -1824,7 +1823,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
/* Return error if SFP module has been detected but is not supported */
if (hw->phy.type == ixgbe_phy_sfp_unsupported)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
return status;
}
@@ -1863,13 +1862,13 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Verifies that installed the firmware version is 0.6 or higher
* for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
*
- * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
- * if the FW version is not supported.
+ * Return: -EACCES if the FW is not present or if the FW version is
+ * not supported.
**/
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
+ s32 status = -EACCES;
u16 offset;
u16 fw_version = 0;
@@ -1883,7 +1882,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
goto fw_version_err;
if (fw_offset == 0 || fw_offset == 0xFFFF)
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
/* get the offset to the Pass Through Patch Configuration block */
offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
@@ -1891,7 +1890,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
goto fw_version_err;
if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
/* get the firmware version */
offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
@@ -1905,7 +1904,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
fw_version_err:
hw_err(hw, "eeprom read at offset %d failed\n", offset);
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
}
/**
@@ -2038,7 +2037,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
hw_dbg(hw, "auto negotiation not completed\n");
- ret_val = IXGBE_ERR_RESET_FAILED;
+ ret_val = -EIO;
goto reset_pipeline_out;
}
@@ -2087,7 +2086,7 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
goto release_i2c_access;
}
}
@@ -2141,7 +2140,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
goto release_i2c_access;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 878dd8dff528..2e6e0365154a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -124,7 +124,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
*/
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
/*
@@ -215,7 +215,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
if (hw->mac.type != ixgbe_mac_X540) {
@@ -500,7 +500,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
@@ -526,7 +526,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
/* we will need 11 characters to store the PBA */
if (pba_num_size < 11) {
hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
+ return -ENOSPC;
}
/* extract hex string from data and pba_ptr */
@@ -563,13 +563,13 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
if (length == 0xFFFF || length == 0) {
hw_dbg(hw, "NVM PBA number section invalid length\n");
- return IXGBE_ERR_PBA_SECTION;
+ return -EIO;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
+ return -ENOSPC;
}
/* trim pba length from start of string */
@@ -684,7 +684,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
- bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg);
bus->lan_id = bus->func;
/* check for a port swap */
@@ -695,8 +695,8 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
/* Get MAC instance from EEPROM for configuring CS4227 */
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
- bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
- IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID,
+ ee_ctrl_4);
}
}
@@ -805,7 +805,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -826,7 +826,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -870,10 +870,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* SPI EEPROM is assumed here. This code would need to
* change if a future EEPROM is not SPI.
*/
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -904,11 +903,8 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset + words > hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || (offset + words > hw->eeprom.word_size))
+ return -EINVAL;
/*
* The EEPROM page size cannot be queried from the chip. We do lazy
@@ -962,7 +958,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
for (i = 0; i < words; i++) {
@@ -1028,7 +1024,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ return -EINVAL;
return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
}
@@ -1050,11 +1046,8 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset + words > hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || (offset + words > hw->eeprom.word_size))
+ return -EINVAL;
/*
* We cannot hold synchronization semaphores for too long
@@ -1099,7 +1092,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
for (i = 0; i < words; i++) {
@@ -1142,7 +1135,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ return -EINVAL;
return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
}
@@ -1165,11 +1158,8 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || offset >= hw->eeprom.word_size)
+ return -EINVAL;
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1262,11 +1252,8 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || offset >= hw->eeprom.word_size)
+ return -EINVAL;
for (i = 0; i < words; i++) {
eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1328,7 +1315,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
}
udelay(5);
}
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -1344,7 +1331,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
u32 i;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
@@ -1366,7 +1353,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Setup EEPROM for Read/Write */
@@ -1419,7 +1406,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SMBI) {
hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
}
@@ -1447,7 +1434,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
if (i >= timeout) {
hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
return 0;
@@ -1503,7 +1490,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
hw_dbg(hw, "SPI EEPROM Status error\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
return 0;
@@ -1715,7 +1702,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
if (hw->eeprom.ops.read(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* If the pointer seems invalid */
@@ -1724,7 +1711,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
if (hw->eeprom.ops.read(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
if (length == 0xFFFF || length == 0)
@@ -1733,7 +1720,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
for (j = pointer + 1; j <= pointer + length; j++) {
if (hw->eeprom.ops.read(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -1786,7 +1773,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
/* If the user cares, return the calculated checksum */
if (checksum_val)
@@ -1845,7 +1832,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
/* setup VMDq pool selection before this RAR gets enabled */
@@ -1897,7 +1884,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
/*
@@ -2146,7 +2133,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
/* Validate the water mark configuration. */
if (!hw->fc.pause_time)
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -2155,7 +2142,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
}
}
@@ -2212,7 +2199,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Set 802.3x based flow control settings. */
@@ -2269,7 +2256,7 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EINVAL;
if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
@@ -2321,7 +2308,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
(!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2353,12 +2340,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
}
/*
* Read the 10g AN autoc and LP ability registers and resolve
@@ -2407,8 +2394,8 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
**/
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
+ s32 ret_val = -EIO;
bool link_up;
/*
@@ -2510,7 +2497,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Disables PCI-Express primary access and verifies there are no pending
- * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * requests. -EALREADY is returned if primary disable
* bit hasn't caused the primary requests to be disabled, else 0
* is returned signifying primary requests disabled.
**/
@@ -2575,7 +2562,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
+ return -EALREADY;
}
/**
@@ -2600,7 +2587,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
if (!(gssr & (fwmask | swmask))) {
@@ -2620,7 +2607,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
usleep_range(5000, 10000);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
/**
@@ -2757,7 +2744,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
s32 ret_val;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/*
* Link must be up to auto-blink the LEDs;
@@ -2803,7 +2790,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
s32 ret_val;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val)
@@ -2963,7 +2950,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
@@ -3014,7 +3001,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
if (vmdq < 32) {
@@ -3091,7 +3078,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
* will simply bypass the VLVF if there are no entries present in the
* VLVF that contain our VLAN
*/
- first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+ first_empty_slot = vlvf_bypass ? -ENOSPC : 0;
/* add VLAN enable bit for comparison */
vlan |= IXGBE_VLVF_VIEN;
@@ -3115,7 +3102,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
if (!first_empty_slot)
hw_dbg(hw, "No space in VLVF.\n");
- return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
+ return first_empty_slot ? : -ENOSPC;
}
/**
@@ -3135,7 +3122,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
s32 vlvf_index;
if ((vlan > 4095) || (vind > 63))
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/*
* this is a 2 part operation - first the VFTA, then the
@@ -3611,7 +3598,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
*
* Communicates with the manageability block. On success return 0
* else returns semaphore error when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * semaphore, -EINVAL when incorrect parameters passed or -EIO when
+ * command fails.
*
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
@@ -3624,7 +3612,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EINVAL;
}
/* Set bit 9 of FWSTS clearing FW reset indication */
@@ -3635,13 +3623,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
dword_len = length >> 2;
@@ -3666,7 +3654,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
/* Check command successful completion. */
if ((timeout && i == timeout) ||
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
return 0;
}
@@ -3686,7 +3674,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* in these cases.
*
* Communicates with the manageability block. On success return 0
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * else return -EIO or -EINVAL.
**/
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
@@ -3701,7 +3689,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EINVAL;
}
/* Take management host interface semaphore */
status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
@@ -3731,7 +3719,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = -EIO;
goto rel_out;
}
@@ -3762,8 +3750,8 @@ rel_out:
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * else returns -EBUSY when encountering an error acquiring
+ * semaphore or -EIO when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
@@ -3799,7 +3787,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
FW_CEM_RESP_STATUS_SUCCESS)
ret_val = 0;
else
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ ret_val = -EIO;
break;
}
@@ -3897,14 +3885,14 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
return status;
if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
if (status)
return status;
if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
return 0;
}
@@ -3927,7 +3915,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
/* Only support thermal sensors attached to physical port 0 */
if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
if (status)
@@ -3946,10 +3934,10 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
if (status)
return status;
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
+ ets_sensor);
+ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
+ ets_sensor);
if (sensor_location != 0) {
status = hw->phy.ops.read_i2c_byte(hw,
@@ -3987,14 +3975,13 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
/* Only support thermal sensors attached to physical port 0 */
if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
if (status)
return status;
- low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
- IXGBE_ETS_LTHRES_DELTA_SHIFT);
+ low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg);
num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
if (num_sensors > IXGBE_MAX_SENSORS)
num_sensors = IXGBE_MAX_SENSORS;
@@ -4008,10 +3995,10 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
ets_offset + 1 + i);
continue;
}
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
+ ets_sensor);
+ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
+ ets_sensor);
therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
hw->phy.ops.write_i2c_byte(hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4dd897806fa5..9a63457712c7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1413,12 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < IXGBE_TEST_LEN; i++)
- ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]);
+ ethtool_puts(&p, ixgbe_gstrings_test[i]);
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ixgbe_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -3108,35 +3107,37 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
-static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ixgbe_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- ixgbe_get_reta(adapter, indir);
+ if (rxfh->indir)
+ ixgbe_get_reta(adapter, rxfh->indir);
- if (key)
- memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key,
+ ixgbe_get_rxfh_key_size(netdev));
return 0;
}
-static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ixgbe_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
- if (indir) {
+ if (rxfh->indir) {
int max_queues = min_t(int, adapter->num_rx_queues,
ixgbe_rss_indir_tbl_max(adapter));
@@ -3147,18 +3148,19 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < reta_entries; i++)
- if (indir[i] >= max_queues)
+ if (rxfh->indir[i] >= max_queues)
return -EINVAL;
for (i = 0; i < reta_entries; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
ixgbe_store_reta(adapter);
}
/* Fill out the rss hash key */
- if (key) {
- memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+ if (rxfh->key) {
+ memcpy(adapter->rss_key, rxfh->key,
+ ixgbe_get_rxfh_key_size(netdev));
ixgbe_store_key(adapter);
}
@@ -3370,7 +3372,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ s32 status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7311bd545acf..18d63c8c2ff4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -670,8 +670,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
fcoe->indices);
fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
- fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
- IXGBE_FCRETA_ENTRY_HIGH_MASK;
+ fcoe_q_h = FIELD_PREP(IXGBE_FCRETA_ENTRY_HIGH_MASK,
+ fcoe_q_h);
}
fcoe_i = fcoe->offset + (i % fcoe->indices);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 94bde2cad0f4..99876b765b08 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2756,7 +2756,6 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
- s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
@@ -2790,14 +2789,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
}
/* Check if this is not due to overtemp */
- if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
+ if (!hw->phy.ops.check_overtemp(hw))
return;
break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- rc = hw->phy.ops.check_overtemp(hw);
- if (rc != IXGBE_ERR_OVERTEMP)
+ if (!hw->phy.ops.check_overtemp(hw))
return;
break;
default:
@@ -2941,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5512,7 +5510,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
- int ret = IXGBE_ERR_LINK_SETUP;
+ int ret = -EIO;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5983,13 +5981,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
err = hw->mac.ops.init_hw(hw);
switch (err) {
case 0:
- case IXGBE_ERR_SFP_NOT_PRESENT:
- case IXGBE_ERR_SFP_NOT_SUPPORTED:
+ case -ENOENT:
+ case -EOPNOTSUPP:
break;
- case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
+ case -EALREADY:
e_dev_err("primary disable timed out\n");
break;
- case IXGBE_ERR_EEPROM_VERSION:
+ case -EACCES:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated with "
@@ -7829,10 +7827,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
err = hw->phy.ops.identify_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (err == -EOPNOTSUPP)
goto sfp_out;
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (err == -ENOENT) {
/* If no cable is present, then we need to reset
* the next time we find a good cable. */
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
@@ -7858,7 +7856,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
else
err = hw->mac.ops.setup_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (err == -EOPNOTSUPP)
goto sfp_out;
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
@@ -7867,8 +7865,8 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
sfp_out:
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
- if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
- (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+ if (err == -EOPNOTSUPP &&
+ adapter->netdev->reg_state == NETREG_REGISTERED) {
e_dev_err("failed to initialize because an unsupported "
"SFP+ module type was detected.\n");
e_dev_err("Reload the driver after installing a "
@@ -7938,7 +7936,7 @@ static void ixgbe_service_timer(struct timer_list *t)
static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 status;
+ bool overtemp;
if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
return;
@@ -7948,11 +7946,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
if (!hw->phy.ops.handle_lasi)
return;
- status = hw->phy.ops.handle_lasi(&adapter->hw);
- if (status != IXGBE_ERR_OVERTEMP)
- return;
-
- e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ hw->phy.ops.handle_lasi(&adapter->hw, &overtemp);
+ if (overtemp)
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -10529,6 +10525,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10544,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10552,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10582,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10593,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
@@ -10922,9 +10960,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
ixgbe_set_eee_capable(adapter);
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (err == -ENOENT) {
err = 0;
- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ } else if (err == -EOPNOTSUPP) {
e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
e_dev_err("Reload the driver after installing a supported module.\n");
goto err_sw_init;
@@ -11143,7 +11181,7 @@ skip_sriov:
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
- if (err == IXGBE_ERR_EEPROM_VERSION) {
+ if (err == -EACCES) {
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated "
@@ -11371,7 +11409,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if ((pf_func & 1) == (pdev->devfn & 1)) {
unsigned int device_id;
- vf = (req_id & 0x7F) >> 1;
+ vf = FIELD_GET(0x7F, req_id);
e_dev_err("VF %d has caused a PCIe error\n", vf);
e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
"%8.8x\tdw3: %8.8x\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 5679293e53f7..fe7ef5773369 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -24,7 +24,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
size = mbx->size;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->read(hw, msg, size, mbx_id);
}
@@ -43,10 +43,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (size > mbx->size)
- return IXGBE_ERR_MBX;
+ return -EINVAL;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->write(hw, msg, size, mbx_id);
}
@@ -63,7 +63,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_msg(hw, mbx_id);
}
@@ -80,7 +80,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_ack(hw, mbx_id);
}
@@ -97,7 +97,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_rst(hw, mbx_id);
}
@@ -115,12 +115,12 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
- return IXGBE_ERR_MBX;
+ return -EIO;
udelay(mbx->usec_delay);
}
@@ -140,12 +140,12 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
- return IXGBE_ERR_MBX;
+ return -EIO;
udelay(mbx->usec_delay);
}
@@ -169,7 +169,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
if (ret_val)
@@ -197,7 +197,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
- return IXGBE_ERR_MBX;
+ return -EIO;
/* send msg */
ret_val = mbx->ops->write(hw, msg, size, mbx_id);
@@ -217,7 +217,7 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -238,7 +238,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -259,7 +259,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -295,7 +295,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -317,7 +317,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
return 0;
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8f4316b19278..6434c190e7a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -7,7 +7,6 @@
#include "ixgbe_type.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 689470c1e8ad..f28140a05f09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -102,7 +102,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
csum = ~csum;
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -150,7 +150,7 @@ fail:
hw_dbg(hw, "I2C byte read combined error.\n");
} while (retry < max_retry);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
/**
@@ -179,7 +179,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
csum = ~csum;
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -215,7 +215,7 @@ fail:
hw_dbg(hw, "I2C byte write combined error.\n");
} while (retry < max_retry);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
/**
@@ -262,8 +262,8 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
**/
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
+ u32 status = -EFAULT;
u32 phy_addr;
- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
if (!hw->phy.phy_semaphore_mask) {
if (hw->bus.lan_id)
@@ -276,13 +276,12 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
if (hw->phy.nw_mng_if_sel) {
- phy_addr = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ phy_addr = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
+ hw->phy.nw_mng_if_sel);
if (ixgbe_probe_phy(hw, phy_addr))
return 0;
else
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
}
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -408,8 +407,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
return status;
/* Don't reset PHY if it's shut down due to overtemp. */
- if (!hw->phy.reset_if_overtemp &&
- (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw))
return 0;
/* Blocked by MNG FW so bail */
@@ -457,7 +455,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
if (ctrl & MDIO_CTRL1_RESET) {
hw_dbg(hw, "PHY reset polling failed to complete.\n");
- return IXGBE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -500,7 +498,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address command did not complete.\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Address cycle complete, setup and write the read
@@ -527,7 +525,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY read command didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Read operation is complete. Get the data
@@ -559,7 +557,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
return status;
@@ -604,7 +602,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/*
@@ -632,7 +630,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY write cmd didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -657,7 +655,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
return status;
@@ -1430,7 +1428,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if ((phy_data & MDIO_CTRL1_RESET) != 0) {
hw_dbg(hw, "PHY reset did not complete.\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Get init offsets */
@@ -1448,8 +1446,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
if (ret_val)
goto err_eeprom;
- control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ control = FIELD_GET(IXGBE_CONTROL_MASK_NL, eword);
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -1487,12 +1484,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
hw_dbg(hw, "SOL\n");
} else {
hw_dbg(hw, "Bad control value\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
break;
default:
hw_dbg(hw, "Bad control type\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
}
@@ -1500,7 +1497,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
err_eeprom:
hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/**
@@ -1518,10 +1515,10 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
return ixgbe_identify_qsfp_module_generic(hw);
default:
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1546,7 +1543,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/* LAN ID is needed for sfp_type determination */
@@ -1561,7 +1558,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_1GBE_COMP_CODES,
@@ -1752,7 +1749,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
/* Anything else 82598-based is supported */
@@ -1776,7 +1773,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
hw_dbg(hw, "SFP+ module not supported\n");
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
@@ -1786,7 +1783,7 @@ err_read_i2c_eeprom:
hw->phy.id = 0;
hw->phy.type = ixgbe_phy_unknown;
}
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1813,7 +1810,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/* LAN ID is needed for sfp_type determination */
@@ -1827,7 +1824,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
hw->phy.id = identifier;
@@ -1895,7 +1892,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
} else {
/* unsupported module type */
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
}
@@ -1955,7 +1952,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
}
hw_dbg(hw, "QSFP module not supported\n");
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -1966,7 +1963,7 @@ err_read_i2c_eeprom:
hw->phy.id = 0;
hw->phy.type = ixgbe_phy_unknown;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1986,14 +1983,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 sfp_type = hw->phy.sfp_type;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/*
* Limiting active cables and 1G Phys must be initialized as
@@ -2014,11 +2011,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
hw_err(hw, "eeprom read at %d failed\n",
IXGBE_PHY_INIT_OFFSET_NL);
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ return -EIO;
}
if ((!*list_offset) || (*list_offset == 0xFFFF))
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ return -EIO;
/* Shift offset to first ID word */
(*list_offset)++;
@@ -2037,7 +2034,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
goto err_phy;
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
hw_dbg(hw, "SFP+ module not supported\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
} else {
break;
}
@@ -2050,14 +2047,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_id == IXGBE_PHY_INIT_END_NL) {
hw_dbg(hw, "No matching SFP+ module found\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
err_phy:
hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/**
@@ -2152,7 +2149,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
@@ -2268,7 +2265,7 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u32 swfw_mask = hw->phy.phy_semaphore_mask;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
do {
ixgbe_i2c_start(hw);
@@ -2510,7 +2507,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
if (ack == 1) {
hw_dbg(hw, "I2C ack was not received.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
}
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -2582,7 +2579,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
udelay(IXGBE_I2C_T_LOW);
} else {
hw_dbg(hw, "I2C data was not set to %X\n", data);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
return 0;
@@ -2678,7 +2675,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
return 0;
@@ -2748,22 +2745,24 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
+ *
+ * Return true when an overtemp event detected, otherwise false.
**/
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
{
u16 phy_data = 0;
+ u32 status;
if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
- return 0;
+ return false;
/* Check that the LASI temp alarm status was triggered */
- hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
-
- if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
- return 0;
+ status = hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ MDIO_MMD_PMAPMD, &phy_data);
+ if (status)
+ return false;
- return IXGBE_ERR_OVERTEMP;
+ return !!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM);
}
/** ixgbe_set_copper_phy_power - Control power for copper phy
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 6544c4539c0d..ef72729d7c93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -155,7 +155,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 9cfdfa8a4355..7299a830f6e4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -363,8 +363,7 @@ int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
+ int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
@@ -969,7 +968,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+ u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = adapter->hw_tcs;
@@ -992,8 +991,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
+ int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
int err;
if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
@@ -1327,7 +1325,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
- retval = IXGBE_ERR_MBX;
+ retval = -EIO;
break;
}
@@ -1849,5 +1847,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
ivi->trusted = adapter->vfinfo[vf].trusted;
+ ivi->linkstate = adapter->vfinfo[vf].link_state;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2b00db92b08f..61b9774b3d31 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3509,10 +3509,10 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
- s32 (*check_overtemp)(struct ixgbe_hw *);
+ bool (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
@@ -3665,45 +3665,6 @@ struct ixgbe_info {
const u32 *mvals;
};
-
-/* Error Codes */
-#define IXGBE_ERR_EEPROM -1
-#define IXGBE_ERR_EEPROM_CHECKSUM -2
-#define IXGBE_ERR_PHY -3
-#define IXGBE_ERR_CONFIG -4
-#define IXGBE_ERR_PARAM -5
-#define IXGBE_ERR_MAC_TYPE -6
-#define IXGBE_ERR_UNKNOWN_PHY -7
-#define IXGBE_ERR_LINK_SETUP -8
-#define IXGBE_ERR_ADAPTER_STOPPED -9
-#define IXGBE_ERR_INVALID_MAC_ADDR -10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
-#define IXGBE_ERR_RESET_FAILED -15
-#define IXGBE_ERR_SWFW_SYNC -16
-#define IXGBE_ERR_PHY_ADDR_INVALID -17
-#define IXGBE_ERR_I2C -18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
-#define IXGBE_ERR_SFP_NOT_PRESENT -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED -23
-#define IXGBE_ERR_EEPROM_VERSION -24
-#define IXGBE_ERR_NO_SPACE -25
-#define IXGBE_ERR_OVERTEMP -26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
-#define IXGBE_ERR_FC_NOT_SUPPORTED -28
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
-#define IXGBE_ERR_PBA_SECTION -31
-#define IXGBE_ERR_INVALID_ARGUMENT -32
-#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
-#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
-#define IXGBE_ERR_FW_RESP_INVALID -39
-#define IXGBE_ERR_TOKEN_RETRY -40
-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
#define IXGBE_FUSES0_REV_MASK (3u << 6)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index d5cfb51ff648..57a912e4653f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -84,7 +84,7 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status) {
hw_dbg(hw, "semaphore failed with %d", status);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ctrl = IXGBE_CTRL_RST;
@@ -103,7 +103,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
msleep(100);
@@ -187,16 +187,16 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ u16 eeprom_size;
+ u32 eec;
+
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
@@ -220,7 +220,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_read_eerd_generic(hw, offset, data);
@@ -243,7 +243,7 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
@@ -264,7 +264,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_eewr_generic(hw, offset, data);
@@ -287,7 +287,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
@@ -324,7 +324,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
for (i = 0; i < checksum_last_word; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -349,7 +349,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Skip pointer section if length is invalid. */
@@ -360,7 +360,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
for (j = pointer + 1; j <= pointer + length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -397,7 +397,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
@@ -418,7 +418,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
*/
if (read_checksum != checksum) {
hw_dbg(hw, "Invalid EEPROM checksum");
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
}
/* If the user cares, return the calculated checksum */
@@ -455,7 +455,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
@@ -490,7 +490,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
- if (status == IXGBE_ERR_EEPROM) {
+ if (status == -EIO) {
hw_dbg(hw, "Flash update time out\n");
return status;
}
@@ -540,7 +540,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
return 0;
udelay(5);
}
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -575,7 +575,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
@@ -599,7 +599,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* bits in the SW_FW_SYNC register.
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
@@ -622,11 +622,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
/**
@@ -680,7 +680,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
if (i == timeout) {
hw_dbg(hw,
"Software semaphore SMBI between device drivers not granted.\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Now get the semaphore between SW/FW through the REGSMP bit */
@@ -697,7 +697,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
*/
hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -768,7 +768,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
bool link_up;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
@@ -804,7 +804,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
u32 ledctl_reg;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index aa4bf6c9a2f7..c1adc94a5a65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -206,13 +206,13 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
}
if (retry == IXGBE_CS4227_RETRIES) {
hw_err(hw, "CS4227 reset did not complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
hw_err(hw, "CS4227 EEPROM did not load successfully\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -350,13 +350,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
}
static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
}
/**
@@ -463,7 +463,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
--retries;
} while (retries > 0);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
}
static const struct {
@@ -511,7 +511,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
hw->phy.autoneg_advertised = hw->phy.speeds_supported;
hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
@@ -568,7 +568,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
switch (hw->fc.requested_mode) {
@@ -600,8 +600,10 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
if (rc)
return rc;
+
if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
- return IXGBE_ERR_OVERTEMP;
+ return -EIO;
+
return 0;
}
@@ -628,16 +630,16 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ u16 eeprom_size;
+ u32 eec;
+
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
@@ -675,7 +677,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
*ctrl = command;
if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
hw_dbg(hw, "IOSF wait timed out\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -712,10 +714,10 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to read, error %x\n", error);
- return IXGBE_ERR_PHY;
+ ret = -EIO;
+ goto out;
}
if (!ret)
@@ -750,9 +752,9 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return 0;
if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
- return IXGBE_ERR_FW_RESP_INVALID;
+ return -EIO;
- return IXGBE_ERR_TOKEN_RETRY;
+ return -EAGAIN;
}
/**
@@ -778,7 +780,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
return status;
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return 0;
- return IXGBE_ERR_FW_RESP_INVALID;
+ return -EIO;
}
/**
@@ -942,7 +944,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
local_buffer = buf;
} else {
if (buffer_size < ptr)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
local_buffer = &buffer[ptr];
}
@@ -960,7 +962,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
}
if (buffer && ((u32)start + (u32)length > buffer_size))
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
for (i = start; length; i++, length--) {
if (i == bufsz && !buffer) {
@@ -1012,7 +1014,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
local_buffer = eeprom_ptrs;
} else {
if (buffer_size < IXGBE_EEPROM_LAST_WORD)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
local_buffer = buffer;
}
@@ -1148,7 +1150,7 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum) {
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
hw_dbg(hw, "Invalid EEPROM checksum");
}
@@ -1203,7 +1205,7 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
} else {
hw_dbg(hw, "write ee hostif failed to get semaphore");
- status = IXGBE_ERR_SWFW_SYNC;
+ status = -EBUSY;
}
return status;
@@ -1412,10 +1414,9 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to write, error %x\n", error);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
out:
@@ -1558,7 +1559,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
/* Disable AN and force speed to 10G Serial. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1580,7 +1581,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
break;
default:
/* Other link speeds are not supported by internal KR PHY. */
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
}
status = ixgbe_write_iosf_sb_reg_x550(hw,
@@ -1611,7 +1612,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
*linear = true;
@@ -1630,7 +1631,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
@@ -1660,7 +1661,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* there is no reason to configure CS4227 and SFP not present error is
* not accepted in the setup MAC link flow.
*/
- if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status == -ENOENT)
return 0;
if (status)
@@ -1718,7 +1719,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
break;
default:
/* Other link speeds are not supported by internal PHY. */
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
}
(void)mac->ops.write_iosf_sb_reg(hw,
@@ -1803,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == -ENOENT)
return 0;
if (ret_val)
@@ -1853,7 +1854,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == -ENOENT)
return 0;
if (ret_val)
@@ -1863,7 +1864,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ixgbe_setup_kr_speed_x550em(hw, speed);
if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
/* Get external PHY SKU id */
ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
@@ -1962,7 +1963,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
u16 i, autoneg_status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
- return IXGBE_ERR_CONFIG;
+ return -EIO;
status = ixgbe_check_mac_link_generic(hw, speed, link_up,
link_up_wait_to_complete);
@@ -2145,9 +2146,9 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*/
static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
+ s32 status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2165,7 +2166,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
/* Check if auto-negotiation has completed */
status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ status = -EIO;
goto out;
}
@@ -2369,18 +2370,18 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
+ * @is_overtemp: indicate whether an overtemp event encountered
*
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
- *
- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
- * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+ bool *is_overtemp)
{
u32 status;
u16 reg;
+ *is_overtemp = false;
*lsc = false;
/* Vendor alarm triggered */
@@ -2412,7 +2413,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
/* power down the PHY in case the PHY FW didn't already */
ixgbe_set_copper_phy_power(hw, false);
- return IXGBE_ERR_OVERTEMP;
+ *is_overtemp = true;
+ return -EIO;
}
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
/* device fault alarm triggered */
@@ -2426,7 +2428,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
/* power down the PHY in case the PHY FW didn't */
ixgbe_set_copper_phy_power(hw, false);
- return IXGBE_ERR_OVERTEMP;
+ *is_overtemp = true;
+ return -EIO;
}
}
@@ -2462,12 +2465,12 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
**/
static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
+ bool lsc, overtemp;
u32 status;
u16 reg;
- bool lsc;
/* Clear interrupt flags */
- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, &overtemp);
/* Enable link status change alarm */
@@ -2546,21 +2549,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
* @hw: pointer to hardware structure
+ * @is_overtemp: indicate whether an overtemp event encountered
*
* Handle external Base T PHY interrupt. If high temperature
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
- *
- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
- * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+ bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
bool lsc;
u32 status;
- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, is_overtemp);
if (status)
return status;
@@ -2692,7 +2694,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
u16 speed;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
- return IXGBE_ERR_CONFIG;
+ return -EIO;
if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
@@ -2735,7 +2737,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
break;
default:
/* Internal PHY does not support anything else */
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
return ixgbe_setup_ixfi_x550em(hw, &force_speed);
@@ -2767,7 +2769,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
u16 phy_data;
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
@@ -2789,7 +2791,7 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
u16 phy_data;
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
@@ -2813,8 +2815,9 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * else returns -EBUSY when encountering an error acquiring
+ * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * params passed.
**/
static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
@@ -2825,7 +2828,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
@@ -2850,7 +2853,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
if (fw_cmd.hdr.cmd_or_resp.ret_status !=
FW_CEM_RESP_STATUS_SUCCESS)
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
return 0;
}
@@ -2907,7 +2910,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
/* 10gig parts do not have a word in the EEPROM to determine the
@@ -2942,7 +2945,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
break;
default:
hw_err(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (hw->device_id) {
@@ -2986,8 +2989,8 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
+ s32 status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -3013,7 +3016,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
hw_dbg(hw, "Auto-Negotiation did not complete\n");
- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ status = -EIO;
goto out;
}
@@ -3187,21 +3190,23 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
/**
* ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
* @hw: pointer to hardware structure
+ *
+ * Return true when an overtemp event detected, otherwise false.
*/
-static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
- return rc;
+ return false;
if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
ixgbe_shutdown_fw_phy(hw);
- return IXGBE_ERR_OVERTEMP;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -3222,9 +3227,8 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
- hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ hw->phy.mdio.prtad = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
+ hw->phy.nw_mng_if_sel);
}
}
@@ -3251,8 +3255,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
- ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
+ if (ret_val == -EOPNOTSUPP || ret_val == -EFAULT)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -3460,8 +3463,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
- status == IXGBE_ERR_PHY_ADDR_INVALID)
+ if (status == -EOPNOTSUPP || status == -EFAULT)
return status;
/* start the external PHY */
@@ -3477,7 +3479,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Reset PHY */
@@ -3501,7 +3503,7 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status) {
hw_dbg(hw, "semaphore failed with %d", status);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
@@ -3519,7 +3521,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -3615,7 +3617,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
if (hw->fc.requested_mode == ixgbe_fc_default)
@@ -3672,7 +3674,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
break;
default:
hw_err(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
status = hw->mac.ops.write_iosf_sb_reg(hw,
@@ -3768,7 +3770,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
return 0;
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
- if (status != IXGBE_ERR_TOKEN_RETRY)
+ if (status != -EAGAIN)
return status;
msleep(FW_PHY_TOKEN_DELAY);
}
@@ -3812,7 +3814,7 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
@@ -3838,7 +3840,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
hw->mac.ops.release_swfw_sync(hw, mask);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 296915414a7c..7ac53171b041 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -897,40 +897,41 @@ static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
return IXGBEVF_RSS_HASH_KEY_SIZE;
}
-static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ixgbevf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
- if (key)
- memcpy(key, adapter->rss_key,
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key,
ixgbevf_get_rxfh_key_size(netdev));
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
}
} else {
/* If neither indirection table nor hash key was requested
* - just return a success avoiding taking any locks.
*/
- if (!indir && !key)
+ if (!rxfh->indir && !rxfh->key)
return 0;
spin_lock_bh(&adapter->mbx_lock);
- if (indir)
- err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ if (rxfh->indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw,
+ rxfh->indir,
adapter->num_rx_queues);
- if (!err && key)
- err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+ if (!err && rxfh->key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw,
+ rxfh->key);
spin_unlock_bh(&adapter->mbx_lock);
}
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index 5182fe737c37..ff54fbe41bcc 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
module_platform_driver(liteeth_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 89f26402f8fb..9190eff6c0bb 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -52,17 +53,19 @@
#define MVMDIO_XSMI_BUSY BIT(30)
#define MVMDIO_XSMI_ADDR_REG 0x8
+#define MVMDIO_XSMI_CFG_REG 0xc
+#define MVMDIO_XSMI_CLKDIV_MASK 0x3
+#define MVMDIO_XSMI_CLKDIV_256 0x0
+#define MVMDIO_XSMI_CLKDIV_64 0x1
+#define MVMDIO_XSMI_CLKDIV_32 0x2
+#define MVMDIO_XSMI_CLKDIV_8 0x3
+
/*
* SMI Timeout measurements:
* - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
* - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
*/
#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
-#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
-#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
-
-#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150
-#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160
struct orion_mdio_dev {
void __iomem *regs;
@@ -84,8 +87,6 @@ enum orion_mdio_bus_type {
struct orion_mdio_ops {
int (*is_done)(struct orion_mdio_dev *);
- unsigned int poll_interval_min;
- unsigned int poll_interval_max;
};
/* Wait for the SMI unit to be ready for another operation
@@ -94,34 +95,23 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
- unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
- unsigned long end = jiffies + timeout;
- int timedout = 0;
+ unsigned long timeout;
+ int done;
- while (1) {
- if (ops->is_done(dev))
+ if (dev->err_interrupt <= 0) {
+ if (!read_poll_timeout_atomic(ops->is_done, done, done, 2,
+ MVMDIO_SMI_TIMEOUT, false, dev))
+ return 0;
+ } else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
+
+ if (wait_event_timeout(dev->smi_busy_wait,
+ ops->is_done(dev), timeout))
return 0;
- else if (timedout)
- break;
-
- if (dev->err_interrupt <= 0) {
- usleep_range(ops->poll_interval_min,
- ops->poll_interval_max);
-
- if (time_is_before_jiffies(end))
- ++timedout;
- } else {
- /* wait_event_timeout does not guarantee a delay of at
- * least one whole jiffie, so timeout must be no less
- * than two.
- */
- if (timeout < 2)
- timeout = 2;
- wait_event_timeout(dev->smi_busy_wait,
- ops->is_done(dev), timeout);
-
- ++timedout;
- }
}
dev_err(bus->parent, "Timeout: SMI busy for too long\n");
@@ -135,8 +125,6 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
static const struct orion_mdio_ops orion_mdio_smi_ops = {
.is_done = orion_mdio_smi_is_done,
- .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN,
- .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX,
};
static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
@@ -194,8 +182,6 @@ static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev)
static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
.is_done = orion_mdio_xsmi_is_done,
- .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN,
- .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
};
static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
@@ -246,6 +232,40 @@ static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
return 0;
}
+static void orion_mdio_xsmi_set_mdc_freq(struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ struct clk *mg_core;
+ u32 div, freq, cfg;
+
+ if (device_property_read_u32(bus->parent, "clock-frequency", &freq))
+ return;
+
+ mg_core = of_clk_get_by_name(bus->parent->of_node, "mg_core_clk");
+ if (IS_ERR(mg_core)) {
+ dev_err(bus->parent,
+ "MG core clock unknown, not changing MDC frequency");
+ return;
+ }
+
+ div = clk_get_rate(mg_core) / (freq + 1) + 1;
+ clk_put(mg_core);
+
+ if (div <= 8)
+ div = MVMDIO_XSMI_CLKDIV_8;
+ else if (div <= 32)
+ div = MVMDIO_XSMI_CLKDIV_32;
+ else if (div <= 64)
+ div = MVMDIO_XSMI_CLKDIV_64;
+ else
+ div = MVMDIO_XSMI_CLKDIV_256;
+
+ cfg = readl(dev->regs + MVMDIO_XSMI_CFG_REG);
+ cfg &= ~MVMDIO_XSMI_CLKDIV_MASK;
+ cfg |= div;
+ writel(cfg, dev->regs + MVMDIO_XSMI_CFG_REG);
+}
+
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
{
struct orion_mdio_dev *dev = dev_id;
@@ -324,6 +344,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"unsupported number of clocks, limiting to the first "
__stringify(ARRAY_SIZE(dev->clk)) "\n");
+
+ if (type == BUS_TYPE_XSMI)
+ orion_mdio_xsmi_set_mdc_freq(bus);
} else {
dev->clk[0] = clk_get(&pdev->dev, NULL);
if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 29aac327574d..a641b3534ca3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5030,8 +5030,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
return 0;
}
-static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mvneta_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5042,20 +5043,21 @@ static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed
* and no change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
+ memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
return mvneta_config_rss(pp);
}
-static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mvneta_ethtool_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5063,13 +5065,12 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
if (pp->neta_armada3700)
return -EOPNOTSUPP;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
+ memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 93137606869e..23adf53c2aa1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
}
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+ u32 val;
+ int i;
+
+ /* Drain the BM from all possible residues left by firmware */
+ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+ mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+ put_cpu();
+
+ /* Stop the BM pool */
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
int i, err, poolnum = MVPP2_BM_POOLS_NUM;
struct mvpp2_port *port;
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ /* Clean up the pool state in case it contains stale state */
+ for (i = 0; i < poolnum; i++)
+ mvpp2_bm_pool_cleanup(priv, i);
+
if (priv->percpu_pools) {
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
}
}
- poolnum = mvpp2_get_nrxqs(priv) * 2;
for (i = 0; i < poolnum; i++) {
/* the pool in use */
int pn = i / (poolnum / 2);
@@ -1513,10 +1538,21 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
- if (port->gop_id == 2)
+ if (port->gop_id == 2) {
val |= GENCONF_CTRL0_PORT2_RGMII;
- else if (port->gop_id == 3)
+ } else if (port->gop_id == 3) {
val |= GENCONF_CTRL0_PORT3_RGMII_MII;
+
+ /* According to the specification, GENCONF_CTRL0_PORT3_RGMII
+ * should be set to 1 for RGMII and 0 for MII. However, tests
+ * show that it is the other way around. This is also what
+ * U-Boot does for mvpp2, so it is assumed to be correct.
+ */
+ if (port->phy_interface == PHY_INTERFACE_MODE_MII)
+ val |= GENCONF_CTRL0_PORT3_RGMII;
+ else
+ val &= ~GENCONF_CTRL0_PORT3_RGMII;
+ }
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
@@ -1615,6 +1651,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
return 0;
switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -5634,49 +5671,11 @@ static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
-static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
-
- if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
-
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
-
- return ret;
-}
-
-static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
-
- if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
-
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
-
- if (key)
- return -EOPNOTSUPP;
-
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
-
- return ret;
-}
-
-static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mvpp2_port *port = netdev_priv(dev);
+ u32 rss_context = rxfh->rss_context;
int ret = 0;
if (!mvpp22_rss_is_supported(port))
@@ -5684,33 +5683,34 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
+ rxfh->hfunc = ETH_RSS_HASH_CRC32;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
+ rxfh->indir);
return ret;
}
-static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret;
+ u32 *rss_context = &rxfh->rss_context;
+ int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_CRC32)
return -EOPNOTSUPP;
- if (key)
+ if (rxfh->key)
return -EOPNOTSUPP;
- if (delete)
+ if (*rss_context && rxfh->rss_delete)
return mvpp22_port_rss_ctx_delete(port, *rss_context);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
@@ -5719,8 +5719,13 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
return ret;
}
- return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
+ rxfh->indir);
+
+ return ret;
}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5740,6 +5745,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5762,8 +5768,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
- .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
- .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6898,7 +6902,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
- dev->dev.of_node = port_node;
+ device_set_node(&dev->dev, port_fwnode);
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
@@ -6948,8 +6952,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
MAC_10000FD;
}
- if (mvpp2_port_supports_rgmii(port))
+ if (mvpp2_port_supports_rgmii(port)) {
phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ }
if (comphy) {
/* If a COMPHY is present, we can support any of the
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
index 2026c8118158..62162ed63f34 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/Makefile
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
- octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o \
+ octep_pfvf_mbox.o octep_cnxk_pf.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index d4ee2454675b..b5805969404f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -216,16 +216,21 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
- conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
- conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
- conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF) {
+ conf->pf_ring_cfg.srn = CN98_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN98_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ } else {
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ }
dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
conf->iq.instr_type = OCTEP_64BYTE_INSTR;
- conf->iq.pkind = 0;
conf->iq.db_min = OCTEP_DB_MIN;
conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
@@ -357,16 +362,55 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
{
struct octep_mbox *mbox = oct->mbox[q_no];
- mbox->q_no = q_no;
-
- /* PF mbox interrupt reg */
- mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
-
/* PF to VF DATA reg. PF writes into this reg */
- mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+ mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CN93_SDP_MBOX_PF_VF_DATA(q_no);
/* VF to PF DATA reg. PF reads from this reg */
- mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+ mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CN93_SDP_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Poll for mailbox messages from VF */
+static void octep_poll_pfvf_mailbox(struct octep_device *oct)
+{
+ u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
+ u64 reg0, reg1;
+
+ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ reg1 = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(1));
+ if (reg0 || reg1) {
+ active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
+ active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
+ for (vf = 0; vf < active_vfs; vf++) {
+ vf_mbox_queue = vf * active_rings_per_vf;
+
+ if (vf_mbox_queue < 64) {
+ if (!(reg0 & (0x1UL << vf_mbox_queue)))
+ continue;
+ } else {
+ if (!(reg1 & (0x1UL << (vf_mbox_queue - 64))))
+ continue;
+ }
+
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
+ continue;
+ }
+ schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
+ }
+ if (reg0)
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0), reg0);
+ if (reg1)
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT(1), reg1);
+ }
+}
+
+/* PF-VF mailbox interrupt handler */
+static irqreturn_t octep_pfvf_mbox_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_pfvf_mailbox(oct);
+ return IRQ_HANDLED;
}
/* Poll OEI events like heartbeat */
@@ -398,6 +442,7 @@ static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
*/
static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
{
+ octep_poll_pfvf_mailbox(oct);
octep_poll_oei_cn93_pf(oct);
}
@@ -578,6 +623,13 @@ static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
return IRQ_HANDLED;
}
+/* soft reset of 98xx */
+static int octep_soft_reset_cn98_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN98XX: skip soft reset\n");
+ return 0;
+}
+
/* soft reset of 93xx */
static int octep_soft_reset_cn93_pf(struct octep_device *oct)
{
@@ -634,6 +686,8 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1S(1), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
@@ -660,6 +714,8 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1C(1), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
@@ -795,6 +851,7 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+ oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cn93_pf;
oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
@@ -806,7 +863,10 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
- oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF)
+ oct->hw_ops.soft_reset = octep_soft_reset_cn98_pf;
+ else
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
new file mode 100644
index 000000000000..5de0b5ecbc5f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cnxk_pf.h"
+
+/* We will support 128 pf's in control mbox */
+#define CTRL_MBOX_MAX_PF 128
+#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cnxk_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint",
+ "epf_rsvd0",
+ "epf_vfore_rint",
+ "epf_rsvd1",
+ "epf_mbox_rint",
+ "epf_rsvd2_0",
+ "epf_rsvd2_1",
+ "epf_dma_rint",
+ "epf_dma_vf_rint",
+ "epf_rsvd3",
+ "epf_pp_vf_rint",
+ "epf_rsvd3",
+ "epf_misc_rint",
+ "epf_rsvd5",
+ /* Next 16 are for OEI_RINT */
+ "epf_oei_rint0",
+ "epf_oei_rint1",
+ "epf_oei_rint2",
+ "epf_oei_rint3",
+ "epf_oei_rint4",
+ "epf_oei_rint5",
+ "epf_oei_rint6",
+ "epf_oei_rint7",
+ "epf_oei_rint8",
+ "epf_oei_rint9",
+ "epf_oei_rint10",
+ "epf_oei_rint11",
+ "epf_oei_rint12",
+ "epf_oei_rint13",
+ "epf_oei_rint14",
+ "epf_oei_rint15",
+ /* IOQ interrupt */
+ "octeon_ep"
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cnxk_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cnxk_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_reset_iq(oct, q);
+ cnxk_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cnxk_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cnxk_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CNXK_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CNXK_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_init_config_cnxk_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u8 link = 0;
+ u64 val;
+ int pos;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CNXK_SDP_EPF_RINFO);
+ dev_info(&pdev->dev, "SDP_EPF_RINFO[0x%x]:0x%llx\n", CNXK_SDP_EPF_RINFO, val);
+ conf->sriov_cfg.max_rings_per_vf = CNXK_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CNXK_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CNXK_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CNXK_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ dev_info(&pdev->dev, "SDP_MAC_PF_RING_CTL[%d]:0x%llx\n", oct->pcie_port, val);
+ conf->pf_ring_cfg.srn = CNXK_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CNXK_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_OQ_WMARK_MIN;
+
+ conf->msix_cfg.non_ioq_msix = CNXK_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cnxk_non_ioq_msix_names;
+
+ pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_byte(oct->pdev,
+ pos + PCI_SRIOV_FUNC_LINK,
+ &link);
+ link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
+ }
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
+ CNXK_PEM_BAR4_INDEX_OFFSET +
+ (link * CTRL_MBOX_SZ);
+
+ conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
+ conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CNXK_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~0x7fffffULL;
+
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & 0xffff);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~0xFFFFFFFFULL;
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cnxk_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_VF_PF_DATA(q_no);
+}
+
+static void octep_poll_pfvf_mailbox_cnxk_pf(struct octep_device *oct)
+{
+ u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
+ u64 reg0;
+
+ reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0));
+ if (reg0) {
+ active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
+ active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
+ for (vf = 0; vf < active_vfs; vf++) {
+ vf_mbox_queue = vf * active_rings_per_vf;
+ if (!(reg0 & (0x1UL << vf_mbox_queue)))
+ continue;
+
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
+ continue;
+ }
+ schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
+ }
+ if (reg0)
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0), reg0);
+ }
+}
+
+static irqreturn_t octep_pfvf_mbox_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_pfvf_mailbox_cnxk_pf(oct);
+ return IRQ_HANDLED;
+}
+
+/* Poll OEI events like heartbeat */
+static void octep_poll_oei_cnxk_pf(struct octep_device *oct)
+{
+ u64 reg0;
+
+ /* Check for OEI INTR */
+ reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_OEI_RINT);
+ if (reg0) {
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT, reg0);
+ if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ atomic_set(&oct->hb_miss_cnt, 0);
+ }
+}
+
+/* OEI interrupt handler */
+static irqreturn_t octep_oei_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_oei_cnxk_pf(oct);
+ return IRQ_HANDLED;
+}
+
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ * MBOX_RINT is needed for pfvf mailbox
+ */
+static void octep_poll_non_ioq_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ octep_poll_pfvf_mailbox_cnxk_pf(oct);
+ octep_poll_oei_cnxk_pf(oct);
+}
+
+/* Interrupt handler for input ring error interrupts. */
+static irqreturn_t octep_ire_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for output ring error interrupts. */
+static irqreturn_t octep_ore_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf input ring error interrupts. */
+static irqreturn_t octep_vfire_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf output ring error interrupts. */
+static irqreturn_t octep_vfore_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma related interrupts. */
+static irqreturn_t octep_dma_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ u64 reg_val = 0;
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_RINT);
+ if (reg_val)
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT, reg_val);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma transaction error interrupts for VFs */
+static irqreturn_t octep_dma_vf_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for pp transaction error interrupts for VFs */
+static irqreturn_t octep_pp_vf_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for mac related interrupts. */
+static irqreturn_t octep_misc_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT, reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupts handler for all reserved interrupts. */
+static irqreturn_t octep_rsvd_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cnxk_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset */
+static int octep_soft_reset_cnxk_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CNXKXX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to a hw bug, it is not.
+ * Set it to RUNNING right before reset so that it is not
+ * left in READY (1) state after a reset. This is required
+ * in addition to the early setting to handle the case where
+ * the OcteonTX is unexpectedly reset, reboots, and then
+ * the module is removed.
+ */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
+
+ /* Set chip domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1);
+ /* Wait till Octeon resets. */
+ mdelay(10);
+ /* restore the reset value */
+ octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cnxk_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cnxk_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cnxk_pf(oct, q);
+ octep_enable_oq_cnxk_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cnxk_pf(oct, q);
+ octep_disable_oq_cnxk_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cnxk_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cnxk_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cnxk_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cnxk_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cnxk_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cnxk_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cnxk_pf;
+
+ oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cnxk_pf;
+ oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cnxk_pf;
+ oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cnxk_pf;
+ oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cnxk_pf;
+ oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cnxk_pf;
+ oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cnxk_pf;
+ oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cnxk_pf;
+ oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cnxk_pf;
+ oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cnxk_pf;
+ oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cnxk_pf;
+ oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cnxk_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cnxk_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cnxk_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cnxk_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cnxk_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cnxk_pf;
+ oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cnxk_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cnxk_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cnxk_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cnxk_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cnxk_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cnxk_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cnxk_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cnxk_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cnxk_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cnxk_pf;
+
+ octep_setup_pci_window_regs_cnxk_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CNXK_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cnxk_pf(oct);
+ octep_configure_ring_mapping_cnxk_pf(oct);
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to IPBUPEM-38842, it is not.
+ * Set it to RUNNING early in boot, so that unexpected resets
+ * leave it in a state that is not READY (1).
+ */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index 1622a6ebf036..1627660175c2 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -13,12 +13,16 @@
#define OCTEP_64BYTE_INSTR 64
/* Tx Queue: maximum descriptors per ring */
+/* This needs to be a power of 2 */
#define OCTEP_IQ_MAX_DESCRIPTORS 1024
/* Minimum input (Tx) requests to be enqueued to ring doorbell */
-#define OCTEP_DB_MIN 1
+#define OCTEP_DB_MIN 8
/* Packet threshold for Tx queue interrupt */
#define OCTEP_IQ_INTR_THRESHOLD 0x0
+/* Minimum watermark for backpressure */
+#define OCTEP_OQ_WMARK_MIN 256
+
/* Rx Queue: maximum descriptors per ring */
#define OCTEP_OQ_MAX_DESCRIPTORS 1024
@@ -44,8 +48,6 @@
/* Minimum MTU supported by Octeon network interface */
#define OCTEP_MIN_MTU ETH_MIN_MTU
-/* Maximum MTU supported by Octeon interface*/
-#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
/* Default MTU */
#define OCTEP_DEFAULT_MTU 1500
@@ -58,7 +60,6 @@
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
-#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
@@ -68,12 +69,12 @@
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
-#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
@@ -97,9 +98,6 @@ struct octep_iq_config {
/* Command size - 32 or 64 bytes */
u16 instr_type;
- /* pkind for packets sent to Octeon */
- u16 pkind;
-
/* Minimum number of commands pending to be posted to Octeon before driver
* hits the Input queue doorbell.
*/
@@ -137,6 +135,12 @@ struct octep_oq_config {
* default. The time is specified in microseconds.
*/
u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
};
/* Tx/Rx configuration */
@@ -189,11 +193,37 @@ struct octep_ctrl_mbox_config {
/* Info from firmware */
struct octep_fw_info {
/* interface pkind */
- u16 pkind;
+ u8 pkind;
+
+ /* front size data */
+ u8 fsz;
+
/* heartbeat interval in milliseconds */
u16 hb_interval;
+
/* heartbeat miss count */
u16 hb_miss_count;
+
+ /* reserved */
+ u16 reserved1;
+
+ /* supported rx offloads OCTEP_ETH_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+
+ /* supported tx offloads OCTEP_ETH_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+
+ /* reserved */
+ u32 reserved_offloads;
+
+ /* extra offload flags */
+ u64 ext_ol_flags;
+
+ /* supported features */
+ u64 features[2];
+
+ /* reserved */
+ u64 reserved2[3];
};
/* Data Structure to hold configuration limits and active config */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
index 7f8135788efc..6da32d40f926 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -16,10 +16,12 @@
* |reserved (4 bytes) |
* |-------------------------------------------|
* |host version (8 bytes) |
+ * | low 32 bits |
* |host status (8 bytes) |
* |host reserved (104 bytes) |
* |-------------------------------------------|
- * |fw version (8 bytes) |
+ * |fw version's (8 bytes) |
+ * | min=high 32 bits, max=low 32 bits |
* |fw status (8 bytes) |
* |fw reserved (104 bytes) |
* |===========================================|
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 0594607a2585..01b7be154c38 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -13,6 +13,7 @@
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#include "octep_pfvf_mbox.h"
/* Control plane version */
#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0)
@@ -22,12 +23,15 @@ static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
+static const u32 offloads_sz = sizeof(struct octep_ctrl_net_offloads);
static atomic_t ctrl_net_msg_id;
/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
- [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] =
- OCTEP_CP_VERSION(1, 0, 0)
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE] =
+ OCTEP_CP_VERSION(1, 0, 0),
+ [OCTEP_CTRL_NET_H2F_CMD_OFFLOADS] = OCTEP_CP_VERSION(1, 0, 1)
+
};
/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */
@@ -122,7 +126,7 @@ int octep_ctrl_net_init(struct octep_device *oct)
int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
@@ -139,7 +143,7 @@ int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, state_sz, vfid);
@@ -154,7 +158,7 @@ int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, state_sz, vfid);
@@ -168,7 +172,7 @@ int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
@@ -187,7 +191,7 @@ int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, mac_sz, vfid);
@@ -198,10 +202,28 @@ int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
return octep_send_mbox_req(oct, &d, wait_for_response);
}
+int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+ int err;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, mtu_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req->mtu.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ return d.data.resp.mtu.val;
+}
+
int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, mtu_sz, vfid);
@@ -216,7 +238,7 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
struct octep_iface_rx_stats *rx_stats,
struct octep_iface_tx_stats *tx_stats)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
int err;
@@ -236,7 +258,7 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
int err;
@@ -262,7 +284,7 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, link_info_sz, vfid);
@@ -308,6 +330,11 @@ static int process_mbox_notify(struct octep_device *oct,
octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT)
return -EOPNOTSUPP;
+ if (msg->hdr.s.is_vf) {
+ octep_pfvf_notify(oct, msg);
+ return 0;
+ }
+
switch (cmd) {
case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
if (netif_running(netdev)) {
@@ -331,8 +358,8 @@ static int process_mbox_notify(struct octep_device *oct,
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
{
static u16 msg_sz = sizeof(union octep_ctrl_net_max_data);
- union octep_ctrl_net_max_data data = {0};
- struct octep_ctrl_mbox_msg msg = {0};
+ union octep_ctrl_net_max_data data = {};
+ struct octep_ctrl_mbox_msg msg = {};
int ret;
msg.hdr.s.sz = msg_sz;
@@ -356,7 +383,7 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
struct octep_fw_info *info)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_resp *resp;
struct octep_ctrl_net_h2f_req *req;
int err;
@@ -375,10 +402,41 @@ int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
return 0;
}
+int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+
+ req = &d.data.req;
+ dev_dbg(&oct->pdev->dev, "Sending dev_unload msg to fw\n");
+ init_send_req(&d.msg, req, sizeof(int), vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE;
+
+ return octep_send_mbox_req(oct, &d, false);
+}
+
+int octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid,
+ struct octep_ctrl_net_offloads *offloads,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, offloads_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_OFFLOADS;
+ req->offloads.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->offloads.offloads = *offloads;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
int octep_ctrl_net_uninit(struct octep_device *oct)
{
struct octep_ctrl_net_wait_data *pos, *n;
+ octep_ctrl_net_dev_remove(oct, OCTEP_CTRL_NET_INVALID_VFID);
+
list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list)
pos->done = 1;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index b330f370131b..0b823bea9cd8 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -42,6 +42,8 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
OCTEP_CTRL_NET_H2F_CMD_GET_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE,
+ OCTEP_CTRL_NET_H2F_CMD_OFFLOADS,
OCTEP_CTRL_NET_H2F_CMD_MAX
};
@@ -112,6 +114,26 @@ struct octep_ctrl_net_h2f_req_cmd_link_info {
struct octep_ctrl_net_link_info info;
};
+/* offloads */
+struct octep_ctrl_net_offloads {
+ /* supported rx offloads OCTEP_RX_OFFLOAD_* */
+ u16 rx_offloads;
+ /* supported tx offloads OCTEP_TX_OFFLOAD_* */
+ u16 tx_offloads;
+ /* reserved */
+ u32 reserved_offloads;
+ /* extra offloads */
+ u64 ext_offloads;
+};
+
+/* get/set offloads */
+struct octep_ctrl_net_h2f_req_cmd_offloads {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_offloads */
+ struct octep_ctrl_net_offloads offloads;
+};
+
/* Host to fw request data */
struct octep_ctrl_net_h2f_req {
union octep_ctrl_net_req_hdr hdr;
@@ -121,6 +143,7 @@ struct octep_ctrl_net_h2f_req {
struct octep_ctrl_net_h2f_req_cmd_state link;
struct octep_ctrl_net_h2f_req_cmd_state rx;
struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ struct octep_ctrl_net_h2f_req_cmd_offloads offloads;
};
} __packed;
@@ -178,6 +201,7 @@ struct octep_ctrl_net_h2f_resp {
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
struct octep_ctrl_net_h2f_resp_cmd_get_info info;
+ struct octep_ctrl_net_offloads offloads;
};
} __packed;
@@ -218,87 +242,105 @@ struct octep_ctrl_net_wait_data {
} data;
};
-/** Initialize data for ctrl net.
+/**
+ * octep_ctrl_net_init() - Initialize data for ctrl net.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*
* return value: 0 on success, -errno on error.
*/
int octep_ctrl_net_init(struct octep_device *oct);
-/** Get link status from firmware.
+/**
+ * octep_ctrl_net_get_link_status() - Get link status from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
*
* return value: link status 0=down, 1=up.
*/
int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
-/** Set link status in firmware.
+/**
+ * octep_ctrl_net_set_link_status() - Set link status in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param up: boolean status.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @up: boolean status.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure
*/
int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
bool wait_for_response);
-/** Set rx state in firmware.
+/**
+ * octep_ctrl_net_set_rx_state() - Set rx state in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param up: boolean status.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @up: boolean status.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
bool wait_for_response);
-/** Get mac address from firmware.
+/**
+ * octep_ctrl_net_get_mac_addr() - Get mac address from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param addr: non-null pointer to mac address.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @addr: non-null pointer to mac address.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr);
-/** Set mac address in firmware.
+/**
+ * octep_ctrl_net_set_mac_addr() - Set mac address in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param addr: non-null pointer to mac address.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @addr: non-null pointer to mac address.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
bool wait_for_response);
-/** Set mtu in firmware.
+/**
+ * octep_ctrl_net_get_mtu() - Get max MTU from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param mtu: mtu.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ *
+ * return value: mtu on success, -errno on failure.
+ */
+int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid);
+
+/**
+ * octep_ctrl_net_set_mtu() - Set mtu in firmware.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @mtu: mtu.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
bool wait_for_response);
-/** Get interface statistics from firmware.
+/**
+ * octep_ctrl_net_get_if_stats() - Get interface statistics from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param rx_stats: non-null pointer struct octep_iface_rx_stats.
- * @param tx_stats: non-null pointer struct octep_iface_tx_stats.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @rx_stats: non-null pointer struct octep_iface_rx_stats.
+ * @tx_stats: non-null pointer struct octep_iface_tx_stats.
*
* return value: 0 on success, -errno on failure.
*/
@@ -306,23 +348,25 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
struct octep_iface_rx_stats *rx_stats,
struct octep_iface_tx_stats *tx_stats);
-/** Get link info from firmware.
+/**
+ * octep_ctrl_net_get_link_info() - Get link info from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param link_info: non-null pointer to struct octep_iface_link_info.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @link_info: non-null pointer to struct octep_iface_link_info.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info);
-/** Set link info in firmware.
+/**
+ * octep_ctrl_net_set_link_info() - Set link info in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param link_info: non-null pointer to struct octep_iface_link_info.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @link_info: non-null pointer to struct octep_iface_link_info.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
@@ -331,26 +375,53 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct,
struct octep_iface_link_info *link_info,
bool wait_for_response);
-/** Poll for firmware messages and process them.
+/**
+ * octep_ctrl_net_recv_fw_messages() - Poll for firmware messages and process them.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*/
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
-/** Get info from firmware.
+/**
+ * octep_ctrl_net_get_info() - Get info from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param info: non-null pointer to struct octep_fw_info.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @info: non-null pointer to struct octep_fw_info.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
struct octep_fw_info *info);
-/** Uninitialize data for ctrl net.
+/**
+ * octep_ctrl_net_dev_remove() - Indicate to firmware that a device unload has happened.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid);
+
+/**
+ * octep_ctrl_net_set_offloads() - Set offloads in firmware.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @offloads: non-null pointer to struct octep_ctrl_net_offloads.
+ * @wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid,
+ struct octep_ctrl_net_offloads *offloads,
+ bool wait_for_response);
+
+/**
+ * octep_ctrl_net_uninit() - Uninitialize data for ctrl net.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*
* return value: 0 on success, -errno on error.
*/
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index a9bdf3283a85..7c9faa714a10 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -16,14 +16,20 @@
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#include "octep_pfvf_mbox.h"
#define OCTEP_INTR_POLL_TIME_MSECS 100
struct workqueue_struct *octep_wq;
/* Supported Devices */
static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_PF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)},
{0, },
};
MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
@@ -155,6 +161,21 @@ static void octep_disable_msix(struct octep_device *oct)
}
/**
+ * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for pfvf mbox interrupts.
+ */
+static irqreturn_t octep_mbox_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.mbox_intr_handler(oct);
+}
+
+/**
* octep_oei_intr_handler() - common handler for output endpoint interrupts.
*
* @irq: Interrupt number.
@@ -357,8 +378,12 @@ static int octep_request_irqs(struct octep_device *oct)
snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
"%s-%s", netdev->name, non_ioq_msix_names[i]);
- if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
- strlen("epf_oei_rint"))) {
+ if (!strncmp(non_ioq_msix_names[i], "epf_mbox_rint", strlen("epf_mbox_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_mbox_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
+ strlen("epf_oei_rint"))) {
ret = request_irq(msix_entry->vector,
octep_oei_intr_handler, 0,
irq_name, oct);
@@ -777,17 +802,24 @@ static int octep_stop(struct net_device *netdev)
*/
static inline int octep_iq_full_check(struct octep_iq *iq)
{
- if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ if (likely((IQ_INSTR_SPACE(iq)) >
OCTEP_WAKE_QUEUE_THRESHOLD))
return 0;
/* Stop the queue if unable to send */
netif_stop_subqueue(iq->netdev, iq->q_no);
+ /* Allow for pending updates in write index
+ * from iq_process_completion in other cpus
+ * to reflect, in case queue gets free
+ * entries.
+ */
+ smp_mb();
+
/* check again and restart the queue, in case NAPI has just freed
* enough Tx ring entries.
*/
- if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
iq->stats.restart_cnt++;
@@ -810,6 +842,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct octep_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
struct octep_tx_sglist_desc *sglist;
struct octep_tx_buffer *tx_buffer;
struct octep_tx_desc_hw *hw_desc;
@@ -818,8 +851,12 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
struct octep_iq *iq;
skb_frag_t *frag;
u16 nr_frags, si;
+ int xmit_more;
u16 q_no, wi;
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
q_no = skb_get_queue_mapping(skb);
if (q_no >= oct->num_iqs) {
netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
@@ -827,10 +864,6 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
}
iq = oct->iq[q_no];
- if (octep_iq_full_check(iq)) {
- iq->stats.tx_busy++;
- return NETDEV_TX_BUSY;
- }
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
@@ -843,8 +876,9 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
tx_buffer->skb = skb;
ih = &hw_desc->ih;
- ih->tlen = skb->len;
- ih->pkind = oct->pkind;
+ ih->pkind = oct->conf->fw_info.pkind;
+ ih->fsz = oct->conf->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
if (!nr_frags) {
tx_buffer->gather = 0;
@@ -869,9 +903,6 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
if (dma_mapping_error(iq->dev, dma))
goto dma_map_err;
- dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
- OCTEP_SGLIST_SIZE_PER_PKT,
- DMA_TO_DEVICE);
memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
sglist[0].len[3] = len;
sglist[0].dma_ptr[0] = dma;
@@ -891,26 +922,46 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
frag++;
si++;
}
- dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
- OCTEP_SGLIST_SIZE_PER_PKT,
- DMA_TO_DEVICE);
-
hw_desc->dptr = tx_buffer->sglist_dma;
}
- netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ if (oct->conf->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more);
+
skb_tx_timestamp(skb);
- atomic_inc(&iq->instr_pending);
+ iq->fill_cnt++;
wi++;
- if (wi == iq->max_count)
- wi = 0;
- iq->host_write_index = wi;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
/* Flush the hw descriptor before writing to doorbell */
wmb();
-
- /* Ring Doorbell to notify the NIC there is a new packet */
- writel(1, iq->doorbell_reg);
- iq->stats.instr_posted++;
+ /* Ring Doorbell to notify the NIC of new packets */
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
return NETDEV_TX_OK;
dma_map_sg_err:
@@ -1051,6 +1102,41 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
+static int octep_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct octep_ctrl_net_offloads offloads = { 0 };
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & dev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM;
+
+ err = octep_ctrl_net_set_offloads(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ &offloads,
+ true);
+ if (!err)
+ dev->features = features;
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1059,6 +1145,7 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_tx_timeout = octep_tx_timeout,
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
+ .ndo_set_features = octep_set_features,
};
/**
@@ -1132,10 +1219,20 @@ static void octep_ctrl_mbox_task(struct work_struct *work)
static const char *octep_devid_to_str(struct octep_device *oct)
{
switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN98_PF:
+ return "CN98XX";
case OCTEP_PCI_DEVICE_ID_CN93_PF:
return "CN93XX";
case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_PF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_PF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_PF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_PF:
+ return "CN10KB";
default:
return "Unsupported";
}
@@ -1174,6 +1271,7 @@ int octep_device_setup(struct octep_device *oct)
dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN98_PF:
case OCTEP_PCI_DEVICE_ID_CN93_PF:
case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
@@ -1181,13 +1279,20 @@ int octep_device_setup(struct octep_device *oct)
OCTEP_MINOR_REV(oct));
octep_device_setup_cn93_pf(oct);
break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_PF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_PF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_PF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_PF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
+ octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cnxk_pf(oct);
+ break;
default:
dev_err(&pdev->dev,
"%s: unsupported device\n", __func__);
goto unsupported_dev;
}
- oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
ret = octep_ctrl_net_init(oct);
if (ret)
@@ -1237,6 +1342,7 @@ static void octep_device_cleanup(struct octep_device *oct)
oct->mbox[i] = NULL;
}
+ octep_delete_pfvf_mbox(oct);
octep_ctrl_net_uninit(oct);
cancel_delayed_work_sync(&oct->hb_task);
@@ -1284,6 +1390,7 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct octep_device *octep_dev = NULL;
struct net_device *netdev;
+ int max_rx_pktlen;
int err;
err = pci_enable_device(pdev);
@@ -1333,6 +1440,12 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_octep_config;
}
+ err = octep_setup_pfvf_mbox(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "PF-VF mailbox setup failed\n");
+ goto register_dev_err;
+ }
+
err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
&octep_dev->conf->fw_info);
if (err) {
@@ -1350,11 +1463,29 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_carrier_off(netdev);
netdev->hw_features = NETIF_F_SG;
- netdev->features |= netdev->hw_features;
+ if (OCTEP_TX_IP_CSUM(octep_dev->conf->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_RX_IP_CSUM(octep_dev->conf->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ max_rx_pktlen = octep_ctrl_net_get_mtu(octep_dev, OCTEP_CTRL_NET_INVALID_VFID);
+ if (max_rx_pktlen < 0) {
+ dev_err(&octep_dev->pdev->dev,
+ "Failed to get max receive packet size; err = %d\n", max_rx_pktlen);
+ err = max_rx_pktlen;
+ goto register_dev_err;
+ }
netdev->min_mtu = OCTEP_MIN_MTU;
- netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->max_mtu = max_rx_pktlen - (ETH_HLEN + ETH_FCS_LEN);
netdev->mtu = OCTEP_DEFAULT_MTU;
+ if (OCTEP_TX_TSO(octep_dev->conf->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
octep_dev->mac_addr);
if (err) {
@@ -1383,6 +1514,21 @@ err_dma_mask:
return err;
}
+static int octep_sriov_disable(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ if (pci_vfs_assigned(oct->pdev)) {
+ dev_warn(&pdev->dev, "Can't disable SRIOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ pci_disable_sriov(pdev);
+ CFG_GET_ACTIVE_VFS(oct->conf) = 0;
+
+ return 0;
+}
+
/**
* octep_remove() - Remove Octeon PCI device from driver control.
*
@@ -1400,6 +1546,7 @@ static void octep_remove(struct pci_dev *pdev)
return;
netdev = oct->netdev;
+ octep_sriov_disable(oct);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
@@ -1410,11 +1557,47 @@ static void octep_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static int octep_sriov_enable(struct octep_device *oct, int num_vfs)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int err;
+
+ CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs;
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err);
+ CFG_GET_ACTIVE_VFS(oct->conf) = 0;
+ return err;
+ }
+
+ return num_vfs;
+}
+
+static int octep_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ int max_nvfs;
+
+ if (num_vfs == 0)
+ return octep_sriov_disable(oct);
+
+ max_nvfs = CFG_GET_MAX_VFS(oct->conf);
+
+ if (num_vfs > max_nvfs) {
+ dev_err(&pdev->dev, "Invalid VF count Max supported VFs = %d\n",
+ max_nvfs);
+ return -EINVAL;
+ }
+
+ return octep_sriov_enable(oct, num_vfs);
+}
+
static struct pci_driver octep_driver = {
.name = OCTEP_DRV_NAME,
.id_table = octep_pci_id_tbl,
.probe = octep_probe,
.remove = octep_remove,
+ .sriov_configure = octep_sriov_configure,
};
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 6df902ebb7f3..fee59e0e0138 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -18,11 +18,17 @@
#define OCTEP_PCIID_CN93_PF 0xB200177d
#define OCTEP_PCIID_CN93_VF 0xB203177d
+#define OCTEP_PCI_DEVICE_ID_CN98_PF 0xB100
#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF
+#define OCTEP_PCI_DEVICE_ID_CN10KA_PF 0xB900 //CN10KA PF
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_PF 0xBA00 //CNF10KA PF
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_PF 0xBC00 //CNF10KB PF
+#define OCTEP_PCI_DEVICE_ID_CN10KB_PF 0xBD00 //CN10KB PF
+
#define OCTEP_MAX_QUEUES 63
#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
@@ -40,6 +46,15 @@
#define OCTEP_OQ_INTR_RESEND_BIT 59
#define OCTEP_MMIO_REGIONS 3
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
/* PCI address space mapping information.
* Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
* Octeon gets mapped to different physical address spaces in
@@ -65,6 +80,7 @@ struct octep_hw_ops {
void (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+ irqreturn_t (*mbox_intr_handler)(void *ioq_vector);
irqreturn_t (*oei_intr_handler)(void *ioq_vector);
irqreturn_t (*ire_intr_handler)(void *ioq_vector);
irqreturn_t (*ore_intr_handler)(void *ioq_vector);
@@ -103,28 +119,27 @@ struct octep_mbox_data {
u64 *data;
};
+#define MAX_VF_PF_MBOX_DATA_SIZE 384
+/* wrappers around work structs */
+struct octep_pfvf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+ u64 ctxul;
+};
+
/* Octeon device mailbox */
struct octep_mbox {
- /* A spinlock to protect access to this q_mbox. */
- spinlock_t lock;
-
- u32 q_no;
- u32 state;
-
- /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
- u8 __iomem *mbox_int_reg;
-
- /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
- * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
- */
- u8 __iomem *mbox_write_reg;
-
- /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
- * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
- */
- u8 __iomem *mbox_read_reg;
-
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+ u32 vf_id;
+ u32 config_data_index;
+ u32 message_len;
+ u8 __iomem *pf_vf_data_reg;
+ u8 __iomem *vf_pf_data_reg;
+ struct octep_pfvf_mbox_wk wk;
+ struct octep_device *oct;
struct octep_mbox_data mbox_data;
+ u8 config_data[MAX_VF_PF_MBOX_DATA_SIZE];
};
/* Tx/Rx queue vector per interrupt. */
@@ -202,6 +217,12 @@ struct octep_iface_link_info {
u8 oper_up;
};
+/* The Octeon VF device specific info data structure.*/
+struct octep_pfvf_info {
+ u8 mac_addr[ETH_ALEN];
+ u32 mbox_version;
+};
+
/* The Octeon device specific private data structure.
* Each Octeon device has this structure to represent all its components.
*/
@@ -232,8 +253,7 @@ struct octep_device {
/* Tx queues (IQ: Instruction Queue) */
u16 num_iqs;
- /* pkind value to be used in every Tx hardware descriptor */
- u8 pkind;
+
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
@@ -268,6 +288,8 @@ struct octep_device {
/* Mailbox to talk to VFs */
struct octep_mbox *mbox[OCTEP_MAX_VF];
+ /* VFs info */
+ struct octep_pfvf_info vf_info[OCTEP_MAX_VF];
/* Work entry to handle Tx timeout */
struct work_struct tx_timeout_task;
@@ -377,6 +399,7 @@ int octep_setup_oqs(struct octep_device *oct);
void octep_free_oqs(struct octep_device *oct);
void octep_oq_dbell_init(struct octep_device *oct);
void octep_device_setup_cn93_pf(struct octep_device *oct);
+void octep_device_setup_cnxk_pf(struct octep_device *oct);
int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
int octep_oq_process_rx(struct octep_oq *oq, int budget);
void octep_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
new file mode 100644
index 000000000000..2e2c3be8a0b4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_pfvf_mbox.h"
+#include "octep_ctrl_net.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+static void octep_pfvf_validate_version(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ u32 vf_version = (u32)cmd.s_version.version;
+
+ dev_dbg(&oct->pdev->dev, "VF id:%d VF version:%d PF version:%d\n",
+ vf_id, vf_version, OCTEP_PFVF_MBOX_VERSION_CURRENT);
+ if (vf_version < OCTEP_PFVF_MBOX_VERSION_CURRENT)
+ rsp->s_version.version = vf_version;
+ else
+ rsp->s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+
+ oct->vf_info[vf_id].mbox_version = rsp->s_version.version;
+ dev_dbg(&oct->pdev->dev, "VF id:%d negotiated VF version:%d\n",
+ vf_id, oct->vf_info[vf_id].mbox_version);
+
+ rsp->s_version.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_link_status(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int status;
+
+ status = octep_ctrl_net_get_link_status(oct, vf_id);
+ if (status < 0) {
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF link status failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ rsp->s_link_status.status = status;
+}
+
+static void octep_pfvf_set_link_status(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_link_status(oct, vf_id, cmd.s_link_status.status, true);
+ if (err) {
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF link status failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_set_rx_state(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_rx_state(oct, vf_id, cmd.s_link_state.state, true);
+ if (err) {
+ rsp->s_link_state.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF Rx link state failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_state.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static int octep_send_notification(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd)
+{
+ u32 max_rings_per_vf, vf_mbox_queue;
+ struct octep_mbox *mbox;
+
+ /* check if VF PF Mailbox is compatible for this notification */
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->vf_info[vf_id].mbox_version) {
+ dev_dbg(&oct->pdev->dev, "VF Mbox doesn't support Notification:%d on VF ver:%d\n",
+ cmd.s.opcode, oct->vf_info[vf_id].mbox_version);
+ return -EOPNOTSUPP;
+ }
+
+ max_rings_per_vf = CFG_GET_MAX_RPVF(oct->conf);
+ vf_mbox_queue = vf_id * max_rings_per_vf;
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "Notif obtained for bad mbox vf %d\n", vf_id);
+ return -EINVAL;
+ }
+ mbox = oct->mbox[vf_mbox_queue];
+
+ mutex_lock(&mbox->lock);
+ writeq(cmd.u64, mbox->pf_vf_data_reg);
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+}
+
+static void octep_pfvf_set_mtu(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_mtu(oct, vf_id, cmd.s_set_mtu.mtu, true);
+ if (err) {
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF MTU failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_mtu(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int max_rx_pktlen = oct->netdev->max_mtu + (ETH_HLEN + ETH_FCS_LEN);
+
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ rsp->s_get_mtu.mtu = max_rx_pktlen;
+}
+
+static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
+ if (err) {
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
+ if (err) {
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_dev_remove(oct, vf_id);
+ if (err) {
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Failed to acknowledge fw of vf %d removal\n",
+ vf_id);
+ return;
+ }
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_fw_info(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_fw_info fw_info;
+ int err;
+
+ err = octep_ctrl_net_get_info(oct, vf_id, &fw_info);
+ if (err) {
+ rsp->s_fw_info.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF info failed via host control Mbox\n");
+ return;
+ }
+
+ rsp->s_fw_info.pkind = fw_info.pkind;
+ rsp->s_fw_info.fsz = fw_info.fsz;
+ rsp->s_fw_info.rx_ol_flags = fw_info.rx_ol_flags;
+ rsp->s_fw_info.tx_ol_flags = fw_info.tx_ol_flags;
+
+ rsp->s_fw_info.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_set_offloads(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_ctrl_net_offloads offloads = {
+ .rx_offloads = cmd.s_offloads.rx_ol_flags,
+ .tx_offloads = cmd.s_offloads.tx_ol_flags
+ };
+ int err;
+
+ err = octep_ctrl_net_set_offloads(oct, vf_id, &offloads, true);
+ if (err) {
+ rsp->s_offloads.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF offloads failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_offloads.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+int octep_setup_pfvf_mbox(struct octep_device *oct)
+{
+ int i = 0, num_vfs = 0, rings_per_vf = 0;
+ int ring = 0;
+
+ num_vfs = oct->conf->sriov_cfg.active_vfs;
+ rings_per_vf = oct->conf->sriov_cfg.max_rings_per_vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ ring = rings_per_vf * i;
+ oct->mbox[ring] = vzalloc(sizeof(*oct->mbox[ring]));
+
+ if (!oct->mbox[ring])
+ goto free_mbox;
+
+ memset(oct->mbox[ring], 0, sizeof(struct octep_mbox));
+ memset(&oct->vf_info[i], 0, sizeof(struct octep_pfvf_info));
+ mutex_init(&oct->mbox[ring]->lock);
+ INIT_WORK(&oct->mbox[ring]->wk.work, octep_pfvf_mbox_work);
+ oct->mbox[ring]->wk.ctxptr = oct->mbox[ring];
+ oct->mbox[ring]->oct = oct;
+ oct->mbox[ring]->vf_id = i;
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ }
+ return 0;
+
+free_mbox:
+ while (i) {
+ i--;
+ ring = rings_per_vf * i;
+ cancel_work_sync(&oct->mbox[ring]->wk.work);
+ mutex_destroy(&oct->mbox[ring]->lock);
+ vfree(oct->mbox[ring]);
+ oct->mbox[ring] = NULL;
+ }
+ return -ENOMEM;
+}
+
+void octep_delete_pfvf_mbox(struct octep_device *oct)
+{
+ int rings_per_vf = oct->conf->sriov_cfg.max_rings_per_vf;
+ int num_vfs = oct->conf->sriov_cfg.active_vfs;
+ int i = 0, ring = 0, vf_srn = 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ ring = vf_srn + rings_per_vf * i;
+ if (!oct->mbox[ring])
+ continue;
+
+ if (work_pending(&oct->mbox[ring]->wk.work))
+ cancel_work_sync(&oct->mbox[ring]->wk.work);
+
+ mutex_destroy(&oct->mbox[ring]->lock);
+ vfree(oct->mbox[ring]);
+ oct->mbox[ring] = NULL;
+ }
+}
+
+static void octep_pfvf_pf_get_data(struct octep_device *oct,
+ struct octep_mbox *mbox, int vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int length = 0;
+ int i = 0;
+ int err;
+ struct octep_iface_link_info link_info;
+ struct octep_iface_rx_stats rx_stats;
+ struct octep_iface_tx_stats tx_stats;
+
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+
+ if (cmd.s_data.frag != OCTEP_PFVF_MBOX_MORE_FRAG_FLAG) {
+ mbox->config_data_index = 0;
+ memset(mbox->config_data, 0, MAX_VF_PF_MBOX_DATA_SIZE);
+ /* Based on the OPCODE CMD the PF driver
+ * specific API should be called to fetch
+ * the requested data
+ */
+ switch (cmd.s.opcode) {
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO:
+ memset(&link_info, 0, sizeof(link_info));
+ err = octep_ctrl_net_get_link_info(oct, vf_id, &link_info);
+ if (!err) {
+ mbox->message_len = sizeof(link_info);
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ memcpy(mbox->config_data, (u8 *)&link_info, sizeof(link_info));
+ } else {
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_STATS:
+ memset(&rx_stats, 0, sizeof(rx_stats));
+ memset(&tx_stats, 0, sizeof(tx_stats));
+ err = octep_ctrl_net_get_if_stats(oct, vf_id, &rx_stats, &tx_stats);
+ if (!err) {
+ mbox->message_len = sizeof(rx_stats) + sizeof(tx_stats);
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ memcpy(mbox->config_data, (u8 *)&rx_stats, sizeof(rx_stats));
+ memcpy(mbox->config_data + sizeof(rx_stats), (u8 *)&tx_stats,
+ sizeof(tx_stats));
+
+ } else {
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+ break;
+ }
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ return;
+ }
+
+ if (mbox->message_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE)
+ length = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ else
+ length = mbox->message_len;
+
+ mbox->message_len -= length;
+
+ for (i = 0; i < length; i++) {
+ rsp->s_data.data[i] =
+ mbox->config_data[mbox->config_data_index];
+ mbox->config_data_index++;
+ }
+}
+
+void octep_pfvf_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg)
+{
+ union octep_pfvf_mbox_word notif = { 0 };
+ struct octep_ctrl_net_f2h_req *req;
+
+ req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
+ switch (req->hdr.s.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ notif.s_link_status.opcode = OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS;
+ notif.s_link_status.status = req->link.state;
+ break;
+ default:
+ pr_info("Unknown mbox notif for vf: %u\n",
+ req->hdr.s.cmd);
+ return;
+ }
+
+ notif.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ octep_send_notification(oct, msg->hdr.s.vf_idx, notif);
+}
+
+void octep_pfvf_mbox_work(struct work_struct *work)
+{
+ struct octep_pfvf_mbox_wk *wk = container_of(work, struct octep_pfvf_mbox_wk, work);
+ union octep_pfvf_mbox_word cmd = { 0 };
+ union octep_pfvf_mbox_word rsp = { 0 };
+ struct octep_mbox *mbox = NULL;
+ struct octep_device *oct = NULL;
+ int vf_id;
+
+ mbox = (struct octep_mbox *)wk->ctxptr;
+ oct = (struct octep_device *)mbox->oct;
+ vf_id = mbox->vf_id;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = readq(mbox->vf_pf_data_reg);
+ rsp.u64 = 0;
+
+ switch (cmd.s.opcode) {
+ case OCTEP_PFVF_MBOX_CMD_VERSION:
+ octep_pfvf_validate_version(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS:
+ octep_pfvf_get_link_status(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS:
+ octep_pfvf_set_link_status(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_RX_STATE:
+ octep_pfvf_set_rx_state(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_MTU:
+ octep_pfvf_set_mtu(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR:
+ octep_pfvf_set_mac_addr(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR:
+ octep_pfvf_get_mac_addr(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO:
+ case OCTEP_PFVF_MBOX_CMD_GET_STATS:
+ octep_pfvf_pf_get_data(oct, mbox, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_MTU:
+ octep_pfvf_get_mtu(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_DEV_REMOVE:
+ octep_pfvf_dev_remove(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_FW_INFO:
+ octep_pfvf_get_fw_info(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS:
+ octep_pfvf_set_offloads(oct, vf_id, cmd, &rsp);
+ break;
+ default:
+ dev_err(&oct->pdev->dev, "PF-VF mailbox: invalid opcode %d\n", cmd.s.opcode);
+ rsp.s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ break;
+ }
+ writeq(rsp.u64, mbox->vf_pf_data_reg);
+ mutex_unlock(&mbox->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
new file mode 100644
index 000000000000..0dc6eead292a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_PFVF_MBOX_H_
+#define _OCTEP_PFVF_MBOX_H_
+
+/* VF flags */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
+#define OCTEON_SDP_16K_HW_FRS 16380UL
+#define OCTEON_SDP_64K_HW_FRS 65531UL
+
+/* When a new command is implemented,PF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2,
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4
+};
+
+enum octep_pfvf_mbox_state {
+ OCTEP_PFVF_MBOX_STATE_IDLE = 0,
+ OCTEP_PFVF_MBOX_STATE_BUSY = 1,
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_MS 500
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+#define OCTEP_PFVF_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_get_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+void octep_pfvf_mbox_work(struct work_struct *work);
+int octep_setup_pfvf_mbox(struct octep_device *oct);
+void octep_delete_pfvf_mbox(struct octep_device *oct);
+void octep_pfvf_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg);
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index 0a43983e9101..ca473502d7a0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -208,6 +208,9 @@
#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+#define CN93_SDP_MBOX_VF_PF_DATA_START 0x24000
+#define CN93_SDP_MBOX_PF_VF_DATA_START 0x22000
+
#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
(CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
@@ -217,6 +220,12 @@
#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
(CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+#define CN93_SDP_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_MBOX_VF_PF_DATA_START + ((ring) * CN93_EPVF_RING_OFFSET))
+
+#define CN93_SDP_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_MBOX_PF_VF_DATA_START + ((ring) * CN93_EPVF_RING_OFFSET))
+
/* ##################### Interrupt Registers ########################## */
#define CN93_SDP_R_ERR_TYPE_START 0x10400
@@ -362,6 +371,10 @@
#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+#define CN98_SDP_MAC_PF_RING_CTL_NPFS(val) (((val) >> 48) & 0xF)
+#define CN98_SDP_MAC_PF_RING_CTL_SRN(val) ((val) & 0xFF)
+#define CN98_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 32) & 0x3F)
+
/* Number of non-queue interrupts in CN93xx */
#define CN93_NUM_NON_IOQ_INTR 16
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
new file mode 100644
index 000000000000..e637d7c8224d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CNXK_PF_H_
+#define _OCTEP_REGS_CNXK_PF_H_
+
+/* ############################ RST ######################### */
+#define CNXK_RST_BOOT 0x000087E006001600ULL
+#define CNXK_RST_CHIP_DOMAIN_W1S 0x000087E006001810ULL
+#define CNXK_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CNXK_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CNXK_CONFIG_XPANSION_BAR 0x38
+#define CNXK_CONFIG_PCIE_CAP 0x70
+#define CNXK_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CNXK_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CNXK_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CNXK_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CNXK_RING_OFFSET (0x1ULL << 17)
+#define CNXK_EPF_OFFSET (0x1ULL << 25)
+#define CNXK_MAC_OFFSET (0x1ULL << 4)
+#define CNXK_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CNXK_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CNXK_SDP_EPF_SCRATCH 0x209E0
+
+/* ################# Window Registers ######################### */
+#define CNXK_SDP_WIN_WR_ADDR64 0x20000
+#define CNXK_SDP_WIN_RD_ADDR64 0x20010
+#define CNXK_SDP_WIN_WR_DATA64 0x20020
+#define CNXK_SDP_WIN_WR_MASK_REG 0x20030
+#define CNXK_SDP_WIN_RD_DATA64 0x20040
+
+#define CNXK_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CNXK_SDP_EPF_RINFO 0x209F0
+
+#define CNXK_SDP_EPF_RINFO_SRN(val) ((val) & 0x7F)
+#define CNXK_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CNXK_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0x7F)
+
+/* SDP Function select */
+#define CNXK_SDP_FUNC_SEL_EPF_BIT_POS 7
+#define CNXK_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CNXK_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CNXK_SDP_R_IN_CONTROL(ring) \
+ (CNXK_SDP_R_IN_CONTROL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_ENABLE(ring) \
+ (CNXK_SDP_R_IN_ENABLE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS(ring) \
+ (CNXK_SDP_R_IN_CNTS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CNXK_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CNXK_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_R_IN_CTL_MASK (CNXK_R_IN_CTL_RDSIZE | CNXK_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CNXK_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_WMARK(ring) \
+ (CNXK_SDP_R_OUT_WMARK_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS(ring) \
+ (CNXK_SDP_R_OUT_CNTS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CNXK_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CNXK_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CNXK_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CNXK_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CNXK_SDP_R_MBOX_ISM_START 0x10500
+#define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CNXK_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_ISM(ring) \
+ (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS_ISM(ring) \
+ (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CNXK_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CNXK_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CNXK_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_SDP_MBOX_VF_PF_DATA_START 0x24000
+#define CNXK_SDP_MBOX_PF_VF_DATA_START 0x22000
+
+#define CNXK_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_MBOX_VF_PF_DATA(ring) \
+ (CNXK_SDP_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+
+#define CNXK_SDP_MBOX_PF_VF_DATA(ring) \
+ (CNXK_SDP_MBOX_PF_VF_DATA_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CNXK_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_SDP_R_ERR_TYPE(ring) \
+ (CNXK_SDP_R_ERR_TYPE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_ISM_START 0x10500
+#define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CNXK_SDP_R_MBOX_ISM(ring) \
+ (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS_ISM(ring) \
+ (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_EPF_MBOX_RINT_START 0x20100
+#define CNXK_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CNXK_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CNXK_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CNXK_SDP_EPF_IRERR_RINT 0x20200
+#define CNXK_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CNXK_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CNXK_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CNXK_SDP_EPF_VFORE_RINT_START 0x20240
+#define CNXK_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CNXK_SDP_EPF_ORERR_RINT 0x20320
+#define CNXK_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CNXK_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CNXK_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CNXK_SDP_EPF_OEI_RINT 0x20400
+#define CNXK_SDP_EPF_OEI_RINT_W1S 0x20500
+#define CNXK_SDP_EPF_OEI_RINT_ENA_W1C 0x20600
+#define CNXK_SDP_EPF_OEI_RINT_ENA_W1S 0x20700
+
+#define CNXK_SDP_EPF_DMA_RINT 0x20800
+#define CNXK_SDP_EPF_DMA_RINT_W1S 0x20810
+#define CNXK_SDP_EPF_DMA_RINT_ENA_W1C 0x20820
+#define CNXK_SDP_EPF_DMA_RINT_ENA_W1S 0x20830
+
+#define CNXK_SDP_EPF_DMA_INT_LEVEL_START 0x20840
+#define CNXK_SDP_EPF_DMA_CNT_START 0x20860
+#define CNXK_SDP_EPF_DMA_TIM_START 0x20880
+
+#define CNXK_SDP_EPF_MISC_RINT 0x208A0
+#define CNXK_SDP_EPF_MISC_RINT_W1S 0x208B0
+#define CNXK_SDP_EPF_MISC_RINT_ENA_W1C 0x208C0
+#define CNXK_SDP_EPF_MISC_RINT_ENA_W1S 0x208D0
+
+#define CNXK_SDP_EPF_DMA_VF_RINT_START 0x208E0
+#define CNXK_SDP_EPF_DMA_VF_RINT_W1S_START 0x20900
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20920
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20940
+
+#define CNXK_SDP_EPF_PP_VF_RINT_START 0x20960
+#define CNXK_SDP_EPF_PP_VF_RINT_W1S_START 0x20980
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x209A0
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x209C0
+
+#define CNXK_SDP_EPF_MBOX_RINT(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_VFIRE_RINT(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_VFORE_RINT(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_DMA_VF_RINT(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_PP_VF_RINT(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CNXK_INTR_R_SEND_ISM BIT_ULL(63)
+#define CNXK_INTR_R_OUT_INT BIT_ULL(62)
+#define CNXK_INTR_R_IN_INT BIT_ULL(61)
+#define CNXK_INTR_R_MBOX_INT BIT_ULL(60)
+#define CNXK_INTR_R_RESEND BIT_ULL(59)
+#define CNXK_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CNXK_SDP_EPVF_RING_START 0x26000
+#define CNXK_SDP_IN_RING_TB_MAP_START 0x28000
+#define CNXK_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CNXK_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CNXK_SDP_EPVF_RING(ring) \
+ (CNXK_SDP_EPVF_RING_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_IN_RING_TB_MAP(ring) \
+ (CNXK_SDP_N_RING_TB_MAP_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_IN_RATE_LIMIT(ring) \
+ (CNXK_SDP_IN_RATE_LIMIT_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_MAC_PF_RING_CTL(mac) \
+ (CNXK_SDP_MAC_PF_RING_CTL_START + ((mac) * CNXK_MAC_OFFSET))
+
+#define CNXK_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0x3)
+#define CNXK_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0x7F)
+#define CNXK_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CNXKxx */
+#define CNXK_NUM_NON_IOQ_INTR 32
+
+/* bit 0 for control mbox interrupt */
+#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
+/* bit 1 for firmware heartbeat interrupt */
+#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_RUNNING 2ULL
+#define CNXK_PEMX_PFX_CSX_PFCFGX(pem, pf, offset) ({ typeof(offset) _off = (offset); \
+ ((0x8e0000008000 | \
+ (uint64_t)(pem) << 36 \
+ | (pf) << 18 \
+ | ((_off >> 16) & 1) << 16 \
+ | (_off >> 3) << 3) \
+ + (((_off >> 2) & 1) << 2)); \
+ })
+
+/* Register defines for use with CNXK_PEMX_PFX_CSX_PFCFGX */
+#define CNXK_PCIEEP_VSECST_CTL 0x418
+
+#define CNXK_PEM_BAR4_INDEX 7
+#define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL
+#define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE)
+
+#endif /* _OCTEP_REGS_CNXK_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 3c43f8078528..4746a6b258f0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -143,7 +143,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
* additional header is filled-in by Octeon after length field in
* Rx packets. this header contains additional packet information.
*/
- if (oct->caps_enabled)
+ if (oct->conf->fw_info.rx_ol_flags)
oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
@@ -353,11 +353,13 @@ static int __octep_oq_process_rx(struct octep_device *oct,
struct octep_oq *oq, u16 pkts_to_process)
{
struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
struct octep_rx_buffer *buff_info;
struct octep_oq_resp_hw *resp_hw;
u32 pkt, rx_bytes, desc_used;
struct sk_buff *skb;
u16 data_offset;
+ u16 rx_ol_flags;
u32 read_idx;
read_idx = oq->host_read_idx;
@@ -372,7 +374,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
- if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ if (oct->conf->fw_info.rx_ol_flags) {
/* Extended response header is immediately after
* response header (resp_hw)
*/
@@ -384,11 +386,13 @@ static int __octep_oq_process_rx(struct octep_device *oct,
*/
data_offset = OCTEP_OQ_RESP_HW_SIZE +
OCTEP_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
} else {
/* Data is immediately after
* Hardware Rx response header.
*/
data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
}
rx_bytes += buff_info->len;
@@ -444,8 +448,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
skb->dev = oq->netdev;
skb->protocol = eth_type_trans(skb, skb->dev);
- if (resp_hw_ext &&
- resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_RX_CSUM_VERIFIED(rx_ol_flags))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 49feae80d7d2..3b08e2d560dc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -20,13 +20,33 @@ struct octep_oq_desc_hw {
dma_addr_t buffer_ptr;
u64 info_ptr;
};
+
static_assert(sizeof(struct octep_oq_desc_hw) == 16);
#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
-#define OCTEP_CSUM_L4_VERIFIED 0x1
-#define OCTEP_CSUM_IP_VERIFIED 0x2
-#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+/* Rx offload flags */
+#define OCTEP_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_RX_OFFLOAD_CKSUM (OCTEP_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_RX_CSUM_L4_VERIFIED | \
+ OCTEP_RX_CSUM_IP_VERIFIED))
/* Extended Response Header in packet data received from Hardware.
* Includes metadata like checksum status.
@@ -35,11 +55,12 @@ static_assert(sizeof(struct octep_oq_desc_hw) == 16);
*/
struct octep_oq_resp_hw_ext {
/* Reserved. */
- u64 reserved:62;
+ u64 rsvd:48;
- /* checksum verified. */
- u64 csum_verified:2;
+ /* offload flags */
+ u16 rx_ol_flags;
};
+
static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8);
#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
@@ -52,6 +73,7 @@ struct octep_oq_resp_hw {
/* The Length of the packet. */
__be64 length;
};
+
static_assert(sizeof(struct octep_oq_resp_hw) == 8);
#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index d0adb82d65c3..06851b78aa28 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -21,7 +21,6 @@ static void octep_iq_reset_indices(struct octep_iq *iq)
iq->flush_index = 0;
iq->pkts_processed = 0;
iq->pkt_in_done = 0;
- atomic_set(&iq->instr_pending, 0);
}
/**
@@ -82,7 +81,6 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- atomic_sub(compl_pkts, &iq->instr_pending);
iq->stats.instr_completed += compl_pkts;
iq->stats.bytes_sent += compl_bytes;
iq->stats.sgentry_sent += compl_sg;
@@ -91,7 +89,7 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
- ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ (IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD))
netif_wake_subqueue(iq->netdev, iq->q_no);
return !budget;
@@ -144,7 +142,6 @@ static void octep_iq_free_pending(struct octep_iq *iq)
dev_kfree_skb_any(skb);
}
- atomic_set(&iq->instr_pending, 0);
iq->flush_index = fi;
netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 86c98b13fc44..875a2c34091f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -36,6 +36,7 @@ struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
};
+
static_assert(sizeof(struct octep_tx_sglist_desc) == 40);
/* Each Scatter/Gather entry sent to hardwar hold four pointers.
@@ -60,6 +61,18 @@ struct octep_tx_buffer {
/* Hardware interface Tx statistics */
struct octep_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
/* Packets dropped due to excessive collisions */
u64 xscol;
@@ -76,12 +89,6 @@ struct octep_iface_tx_stats {
*/
u64 scol;
- /* Total octets sent on the interface */
- u64 octs;
-
- /* Total frames sent on the interface */
- u64 pkts;
-
/* Packets sent with an octet count < 64 */
u64 hist_lt64;
@@ -106,12 +113,6 @@ struct octep_iface_tx_stats {
/* Packets sent with an octet count of > 1518 */
u64 hist_gt1518;
- /* Packets sent to a broadcast DMAC */
- u64 bcst;
-
- /* Packets sent to the multicast DMAC */
- u64 mcst;
-
/* Packets sent that experienced a transmit underflow and were
* truncated
*/
@@ -172,9 +173,6 @@ struct octep_iq {
/* Statistics for this input queue. */
struct octep_iq_stats stats;
- /* This field keeps track of the instructions pending in this queue. */
- atomic_t instr_pending;
-
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
@@ -240,32 +238,53 @@ struct octep_instr_hdr {
/* Reserved3 */
u64 reserved3:1;
};
+
static_assert(sizeof(struct octep_instr_hdr) == 8);
-/* Hardware Tx completion response header */
-struct octep_instr_resp_hdr {
- /* Request ID */
- u64 rid:16;
+/* Tx offload flags */
+#define OCTEP_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_TX_OFFLOAD_CKSUM (OCTEP_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_TX_OFFLOAD_TSO (OCTEP_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_TX_OFFLOAD_UDP_CKSUM))
- /* PCIe port to use for response */
- u64 pcie_port:3;
+#define OCTEP_TX_TSO(flags) ((flags) & \
+ (OCTEP_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_TX_OFFLOAD_UDP_TSO))
- /* Scatter indicator 1=scatter */
- u64 scatter:1;
+struct tx_mdata {
- /* Size of Expected result OR no. of entries in scatter list */
- u64 rlenssz:14;
+ /* offload flags */
+ u16 ol_flags;
- /* Desired destination port for result */
- u64 dport:6;
+ /* gso size */
+ u16 gso_size;
- /* Opcode Specific parameters */
- u64 param:8;
+ /* gso flags */
+ u16 gso_segs;
- /* Opcode for the return packet */
- u64 opcode:16;
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
};
-static_assert(sizeof(struct octep_instr_hdr) == 8);
+
+static_assert(sizeof(struct tx_mdata) == 16);
/* 64-byte Tx instruction format.
* Format of instruction for a 64-byte mode input queue.
@@ -284,18 +303,14 @@ struct octep_tx_desc_hw {
struct octep_instr_hdr ih;
u64 ih64;
};
-
- /* Pointer where the response for a RAW mode packet will be written
- * by Octeon.
- */
- u64 rptr;
-
- /* Input Instruction Response Header. */
- struct octep_instr_resp_hdr irh;
-
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
/* Additional headers available in a 64-byte instruction. */
- u64 exhdr[4];
+ u64 exthdr[4];
};
+
static_assert(sizeof(struct octep_tx_desc_hw) == 64);
#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 9690ac01f02c..b92264d0a77e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 5df42634ceb8..edeb0f737312 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -304,6 +304,13 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
+M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
+ nix_mcast_grp_create_rsp) \
+M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \
+ msg_rsp) \
+M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
+ nix_mcast_grp_update_req, \
+ nix_mcast_grp_update_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -830,6 +837,9 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_MCAST_GRP = -436,
+ NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
+ NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
};
/* For NIX RX vtag action */
@@ -1204,6 +1214,68 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+struct nix_mcast_grp_create_req {
+ struct mbox_msghdr hdr;
+#define NIX_MCAST_INGRESS 0
+#define NIX_MCAST_EGRESS 1
+ u8 dir;
+ u8 reserved[11];
+ /* Reserving few bytes for future requirement */
+};
+
+struct nix_mcast_grp_create_rsp {
+ struct mbox_msghdr hdr;
+ /* This mcast_grp_idx should be passed during MCAM
+ * write entry for multicast. AF will identify the
+ * corresponding multicast table index associated
+ * with the group id and program the same to MCAM entry.
+ * This group id is also needed during group delete
+ * and update request.
+ */
+ u32 mcast_grp_idx;
+};
+
+struct nix_mcast_grp_destroy_req {
+ struct mbox_msghdr hdr;
+ /* Group id returned by nix_mcast_grp_create_rsp */
+ u32 mcast_grp_idx;
+ /* If AF is requesting for destroy, then set
+ * it to '1'. Otherwise keep it to '0'
+ */
+ u8 is_af;
+};
+
+struct nix_mcast_grp_update_req {
+ struct mbox_msghdr hdr;
+ /* Group id returned by nix_mcast_grp_create_rsp */
+ u32 mcast_grp_idx;
+ /* Number of multicast/mirror entries requested */
+ u32 num_mce_entry;
+#define NIX_MCE_ENTRY_MAX 64
+#define NIX_RX_RQ 0
+#define NIX_RX_RSS 1
+ /* Receive queue or RSS index within pf_func */
+ u32 rq_rss_index[NIX_MCE_ENTRY_MAX];
+ /* pcifunc is required for both ingress and egress multicast */
+ u16 pcifunc[NIX_MCE_ENTRY_MAX];
+ /* channel is required for egress multicast */
+ u16 channel[NIX_MCE_ENTRY_MAX];
+#define NIX_MCAST_OP_ADD_ENTRY 0
+#define NIX_MCAST_OP_DEL_ENTRY 1
+ /* Destination type. 0:Receive queue, 1:RSS*/
+ u8 dest_type[NIX_MCE_ENTRY_MAX];
+ u8 op;
+ /* If AF is requesting for update, then set
+ * it to '1'. Otherwise keep it to '0'
+ */
+ u8 is_af;
+};
+
+struct nix_mcast_grp_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 mce_start_index;
+};
+
/* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
@@ -1479,6 +1551,8 @@ struct flow_msg {
#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
u32 mpls_lse[4];
+ u8 icmp_type;
+ u8 icmp_code;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 8c0732c9a7ee..b0b4dea548e1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -214,6 +214,8 @@ enum key_fields {
NPC_MPLS3_TTL,
NPC_MPLS4_LBTCBOS,
NPC_MPLS4_TTL,
+ NPC_TYPE_ICMP,
+ NPC_CODE_ICMP,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 4728ba34b0e3..76218f1cb459 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -506,6 +506,7 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
rpm_t *rpm = rpmd;
u8 num_lmacs;
u32 fifo_len;
+ u16 max_lmac;
lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
/* LMACs are divided into two groups and each group
@@ -513,7 +514,11 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
* Group0 lmac_id range {0..3}
* Group1 lmac_id range {4..7}
*/
- fifo_len = rpm->mac_ops->fifo_len / 2;
+ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
+ if (max_lmac > 4)
+ fifo_len = rpm->mac_ops->fifo_len / 2;
+ else
+ fifo_len = rpm->mac_ops->fifo_len;
if (lmac_id < 4) {
num_lmacs = hweight8(lmac_info & 0xF);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 731bb82b577c..5c1d04a3c559 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
return start;
}
-static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
{
if (!rsrc->bmap)
return;
@@ -935,6 +935,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
hw->total_vfs = (cfg >> 20) & 0xFFF;
hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+ if (!is_rvu_otx2(rvu))
+ rvu_apr_block_cn10k_init(rvu);
+
/* Init NPA LF's bitmap */
block = &hw->block[BLKADDR_NPA];
if (!block->implemented)
@@ -2614,6 +2617,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 2. Flush and reset SSO/SSOW
* 3. Cleanup pools (NPA)
*/
+
+ /* Free multicast/mirror node associated with the 'pcifunc' */
+ rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
+
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 8802961b8889..43be37dd1f32 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -116,11 +116,12 @@ struct rvu_block {
};
struct nix_mcast {
- struct qmem *mce_ctx;
- struct qmem *mcast_buf;
- int replay_pkind;
- int next_free_mce;
- struct mutex mce_lock; /* Serialize MCE updates */
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ struct rsrc_bmap mce_counter[2];
+ /* Counters for both ingress and egress mcast lists */
+ struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@@ -129,6 +130,23 @@ struct nix_mce_list {
int max;
};
+struct nix_mcast_grp_elem {
+ struct nix_mce_list mcast_mce_list;
+ u32 mcast_grp_idx;
+ u32 pcifunc;
+ int mcam_index;
+ int mce_start_index;
+ struct list_head list;
+ u8 dir;
+};
+
+struct nix_mcast_grp {
+ struct list_head mcast_grp_head;
+ int count;
+ int next_grp_index;
+ struct mutex mcast_grp_lock; /* Serialize MCE updates */
+};
+
/* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata {
u8 lid;
@@ -339,6 +357,7 @@ struct nix_hw {
struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
+ struct nix_mcast_grp mcast_grp;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
@@ -742,6 +761,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
@@ -848,6 +868,11 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
struct nix_txsch *txsch, bool enable);
+void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc);
+int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx);
+int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx, u16 mcam_index);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
@@ -896,6 +921,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, bool enable);
+u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index);
+void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u64 cfg);
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
@@ -921,6 +950,8 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
u64 bcast_mcast_val, u64 bcast_mcast_mask);
void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
+int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr);
+void npc_mcam_rsrcs_deinit(struct rvu *rvu);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
@@ -942,6 +973,7 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+void rvu_apr_block_cn10k_init(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 0e74c5a2231e..7fa98aeb3663 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -559,3 +559,12 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
cfg |= BIT_ULL(1) | BIT_ULL(2);
rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
}
+
+void rvu_apr_block_cn10k_init(struct rvu *rvu)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index bd817ee88735..e6d7914ce61c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1825,6 +1825,8 @@ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
@@ -1836,6 +1838,16 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
cq_ctx->bpid, cq_ctx->bp_ena);
+ if (!is_rvu_otx2(rvu)) {
+ seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
+ seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
+ seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
+ seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
+ cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
+ cq_ctx->lbpid_low);
+ seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
+ }
+
seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
cq_ctx->update_time, cq_ctx->avg_level);
seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
@@ -1847,6 +1859,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
cq_ctx->qsize, cq_ctx->caching);
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
+ if (!is_rvu_otx2(rvu)) {
+ seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
+ seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
+ cq_ctx->cpt_drop_err_en);
+ }
seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
cq_ctx->drop_ena, cq_ctx->drop);
seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
@@ -2889,6 +2906,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
rule->mask.mpls_lse[3]);
break;
+ case NPC_TYPE_ICMP:
+ seq_printf(s, "%d ", rule->packet.icmp_type);
+ seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
+ break;
+ case NPC_CODE_ICMP:
+ seq_printf(s, "%d ", rule->packet.icmp_code);
+ seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 21b5d71c1e37..1e6fbd98423d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -5,7 +5,7 @@
*
*/
-#include<linux/bitfield.h>
+#include <linux/bitfield.h>
#include "rvu.h"
#include "rvu_reg.h"
@@ -1237,6 +1237,7 @@ enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
@@ -1354,6 +1355,79 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
return 0;
}
+static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
+
+ return 0;
+}
+
+static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ npc_mcam_rsrcs_deinit(rvu);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ block->lf.max = ctx->val.vu16;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_mcam_rsrcs_init(rvu, blkaddr);
+
+ return 0;
+}
+
+static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u16 max_nix0_lf, max_nix1_lf;
+ struct npc_mcam *mcam;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ max_nix0_lf = cfg & 0xFFF;
+ cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
+ max_nix1_lf = cfg & 0xFFF;
+
+ /* Do not allow user to modify maximum NIX LFs while mcam entries
+ * have already been assigned.
+ */
+ mcam = &rvu->hw->mcam;
+ if (mcam->bmap_fcnt < mcam->bmap_entries) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mcam entries have already been assigned, can't resize");
+ return -EPERM;
+ }
+
+ if (max_nix0_lf && val.vu16 > max_nix0_lf) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "requested nixlf is greater than the max supported nix0_lf");
+ return -EPERM;
+ }
+
+ if (max_nix1_lf && val.vu16 > max_nix1_lf) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "requested nixlf is greater than the max supported nix1_lf");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
@@ -1375,6 +1449,12 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
rvu_af_dl_npc_mcam_high_zone_percent_get,
rvu_af_dl_npc_mcam_high_zone_percent_set,
rvu_af_dl_npc_mcam_high_zone_percent_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
+ "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_nix_maxlf_get,
+ rvu_af_dl_nix_maxlf_set,
+ rvu_af_dl_nix_maxlf_validate),
};
/* Devlink switch mode */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 58744313f0eb..66203a90f052 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -72,12 +72,19 @@ enum nix_makr_fmt_indexes {
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
-#define MC_TBL_SIZE MC_TBL_SZ_512
-#define MC_BUF_CNT MC_BUF_CNT_128
+#define MC_TBL_SIZE MC_TBL_SZ_2K
+#define MC_BUF_CNT MC_BUF_CNT_1024
+
+#define MC_TX_MAX 2048
struct mce {
struct hlist_node node;
+ u32 rq_rss_index;
u16 pcifunc;
+ u16 channel;
+ u8 dest_type;
+ u8 is_active;
+ u8 reserved[2];
};
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
@@ -165,18 +172,33 @@ static void nix_mce_list_init(struct nix_mce_list *list, int max)
list->max = max;
}
-static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
{
+ struct rsrc_bmap *mce_counter;
int idx;
if (!mcast)
- return 0;
+ return -EINVAL;
- idx = mcast->next_free_mce;
- mcast->next_free_mce += count;
+ mce_counter = &mcast->mce_counter[dir];
+ if (!rvu_rsrc_check_contig(mce_counter, count))
+ return -ENOSPC;
+
+ idx = rvu_alloc_rsrc_contig(mce_counter, count);
return idx;
}
+static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
+{
+ struct rsrc_bmap *mce_counter;
+
+ if (!mcast)
+ return;
+
+ mce_counter = &mcast->mce_counter[dir];
+ rvu_free_rsrc_contig(mce_counter, count, start);
+}
+
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
int nix_blkaddr = 0, i = 0;
@@ -2956,7 +2978,8 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
}
static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
- int mce, u8 op, u16 pcifunc, int next, bool eol)
+ int mce, u8 op, u16 pcifunc, int next,
+ int index, u8 mce_op, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -2967,8 +2990,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
aq_req.qidx = mce;
/* Use RSS with RSS index 0 */
- aq_req.mce.op = 1;
- aq_req.mce.index = 0;
+ aq_req.mce.op = mce_op;
+ aq_req.mce.index = index;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
aq_req.mce.next = next;
@@ -2985,6 +3008,206 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
+{
+ struct hlist_node *tmp;
+ struct mce *mce;
+
+ /* Scan through the current list */
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ }
+
+ mce_list->count = 0;
+ mce_list->max = 0;
+}
+
+static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
+{
+ return elem->mce_start_index + elem->mcast_mce_list.count - 1;
+}
+
+static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem)
+{
+ int idx, last_idx, next_idx, err;
+ struct nix_mce_list *mce_list;
+ struct mce *mce, *prev_mce;
+
+ mce_list = &elem->mcast_mce_list;
+ idx = elem->mce_start_index;
+ last_idx = nix_get_last_mce_list_index(elem);
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ if (!mce->is_active) {
+ if (idx == elem->mce_start_index) {
+ idx++;
+ prev_mce = mce;
+ elem->mce_start_index = idx;
+ continue;
+ } else if (idx == last_idx) {
+ err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
+ prev_mce->pcifunc, next_idx,
+ prev_mce->rq_rss_index,
+ prev_mce->dest_type,
+ false);
+ if (err)
+ return err;
+
+ break;
+ }
+ }
+
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ mce->rq_rss_index, mce->dest_type,
+ (next_idx > last_idx) ? true : false);
+ if (err)
+ return err;
+
+ idx++;
+ prev_mce = mce;
+ }
+
+ return 0;
+}
+
+static void nix_update_egress_mce_list_hw(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem)
+{
+ struct nix_mce_list *mce_list;
+ int idx, last_idx, next_idx;
+ struct mce *mce, *prev_mce;
+ u64 regval;
+ u8 eol;
+
+ mce_list = &elem->mcast_mce_list;
+ idx = elem->mce_start_index;
+ last_idx = nix_get_last_mce_list_index(elem);
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ if (!mce->is_active) {
+ if (idx == elem->mce_start_index) {
+ idx++;
+ prev_mce = mce;
+ elem->mce_start_index = idx;
+ continue;
+ } else if (idx == last_idx) {
+ regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
+ rvu_write64(rvu, nix_hw->blkaddr,
+ NIX_AF_TX_MCASTX(idx - 1),
+ regval);
+ break;
+ }
+ }
+
+ eol = 0;
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ if (next_idx > last_idx)
+ eol = 1;
+
+ regval = (next_idx << 16) | (eol << 12) | mce->channel;
+ rvu_write64(rvu, nix_hw->blkaddr,
+ NIX_AF_TX_MCASTX(idx),
+ regval);
+ idx++;
+ prev_mce = mce;
+ }
+}
+
+static int nix_del_mce_list_entry(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem,
+ struct nix_mcast_grp_update_req *req)
+{
+ u32 num_entry = req->num_mce_entry;
+ struct nix_mce_list *mce_list;
+ struct mce *mce;
+ bool is_found;
+ int i;
+
+ mce_list = &elem->mcast_mce_list;
+ for (i = 0; i < num_entry; i++) {
+ is_found = false;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == req->pcifunc[i]) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ is_found = true;
+ break;
+ }
+ }
+
+ if (!is_found)
+ return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+ }
+
+ mce_list->max = mce_list->count;
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+ return 0;
+}
+
+static int nix_add_mce_list_entry(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem,
+ struct nix_mcast_grp_update_req *req)
+{
+ u32 num_entry = req->num_mce_entry;
+ struct nix_mce_list *mce_list;
+ struct hlist_node *tmp;
+ struct mce *mce;
+ int i;
+
+ mce_list = &elem->mcast_mce_list;
+ for (i = 0; i < num_entry; i++) {
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ goto free_mce;
+
+ mce->pcifunc = req->pcifunc[i];
+ mce->channel = req->channel[i];
+ mce->rq_rss_index = req->rq_rss_index[i];
+ mce->dest_type = req->dest_type[i];
+ mce->is_active = 1;
+ hlist_add_head(&mce->node, &mce_list->head);
+ mce_list->count++;
+ }
+
+ mce_list->max += num_entry;
+
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+ return 0;
+
+free_mce:
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ }
+
+ return -ENOMEM;
+}
+
static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
u16 pcifunc, bool add)
{
@@ -3080,6 +3303,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
/* EOL should be set in last MCE */
err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
mce->pcifunc, next_idx,
+ 0, 1,
(next_idx > last_idx) ? true : false);
if (err)
goto end;
@@ -3160,6 +3384,16 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
return err;
}
+static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
+{
+ struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
+
+ INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
+ mutex_init(&mcast_grp->mcast_grp_lock);
+ mcast_grp->next_grp_index = 1;
+ mcast_grp->count = 0;
+}
+
static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
@@ -3184,15 +3418,15 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
continue;
/* save start idx of broadcast mce list */
- pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
/* save start idx of multicast mce list */
- pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
/* save the start idx of promisc mce list */
- pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
for (idx = 0; idx < (numvfs + 1); idx++) {
@@ -3207,7 +3441,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->bcast_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
@@ -3215,7 +3449,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->mcast_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
@@ -3223,7 +3457,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->promisc_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
}
@@ -3238,13 +3472,30 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
int err, size;
size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
- size = (1ULL << size);
+ size = BIT_ULL(size);
+
+ /* Allocate bitmap for rx mce entries */
+ mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
+ err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ if (err)
+ return -ENOMEM;
+
+ /* Allocate bitmap for tx mce entries */
+ mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
+ err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ return -ENOMEM;
+ }
/* Alloc memory for multicast/mirror replication entries */
err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
- (256UL << MC_TBL_SIZE), size);
- if (err)
+ mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
return -ENOMEM;
+ }
rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
(u64)mcast->mce_ctx->iova);
@@ -3257,8 +3508,11 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
(8UL << MC_BUF_CNT), size);
- if (err)
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
return -ENOMEM;
+ }
rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
(u64)mcast->mcast_buf->iova);
@@ -3272,6 +3526,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
+ nix_setup_mcast_grp(nix_hw);
+
return nix_setup_mce_tables(rvu, nix_hw);
}
@@ -4698,6 +4954,74 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
+static void nix_mcast_update_action(struct rvu *rvu,
+ struct nix_mcast_grp_elem *elem)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action rx_action = { 0 };
+ struct nix_tx_action tx_action = { 0 };
+ int npc_blkaddr;
+
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (elem->dir == NIX_MCAST_INGRESS) {
+ *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index);
+ rx_action.index = elem->mce_start_index;
+ npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+ *(u64 *)&rx_action);
+ } else {
+ *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index);
+ tx_action.index = elem->mce_start_index;
+ npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+ *(u64 *)&tx_action);
+ }
+}
+
+static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
+ struct nix_mce_list *mce_list;
+ struct mce *mce;
+
+ /* Iterate the group elements and disable the element which
+ * received the disable request.
+ */
+ mce_list = &elem->mcast_mce_list;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (mce->pcifunc == pcifunc) {
+ mce->is_active = is_active;
+ break;
+ }
+ }
+
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+ else
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+
+ /* Update the multicast index in NPC rule */
+ nix_mcast_update_action(rvu, elem);
+ }
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -4709,6 +5033,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ /* Enable the interface if it is in any multicast list */
+ nix_mcast_update_mce_entry(rvu, pcifunc, 1);
+
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
npc_mcam_enable_flows(rvu, pcifunc);
@@ -4733,6 +5060,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ /* Disable the interface if it is in any multicast list */
+ nix_mcast_update_mce_entry(rvu, pcifunc, 0);
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
@@ -5707,3 +6037,361 @@ int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *re
return 0;
}
+
+static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
+ u32 mcast_grp_idx)
+{
+ struct nix_mcast_grp_elem *iter;
+ bool is_found = false;
+
+ list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
+ if (iter->mcast_grp_idx == mcast_grp_idx) {
+ is_found = true;
+ break;
+ }
+ }
+
+ if (is_found)
+ return iter;
+
+ return NULL;
+}
+
+int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, ret;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
+ if (!elem)
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ else
+ ret = elem->mce_start_index;
+
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+ return ret;
+}
+
+void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_mcast_grp_destroy_req dreq = { 0 };
+ struct nix_mcast_grp_update_req ureq = { 0 };
+ struct nix_mcast_grp_update_rsp ursp = { 0 };
+ struct nix_mcast_grp_elem *elem, *tmp;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
+ struct nix_mce_list *mce_list;
+ struct hlist_node *tmp;
+ struct mce *mce;
+
+ /* If the pcifunc which created the multicast/mirror
+ * group received an FLR, then delete the entire group.
+ */
+ if (elem->pcifunc == pcifunc) {
+ /* Delete group */
+ dreq.hdr.pcifunc = elem->pcifunc;
+ dreq.mcast_grp_idx = elem->mcast_grp_idx;
+ dreq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
+ continue;
+ }
+
+ /* Iterate the group elements and delete the element which
+ * received the FLR.
+ */
+ mce_list = &elem->mcast_mce_list;
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ if (mce->pcifunc == pcifunc) {
+ ureq.hdr.pcifunc = pcifunc;
+ ureq.num_mce_entry = 1;
+ ureq.mcast_grp_idx = elem->mcast_grp_idx;
+ ureq.op = NIX_MCAST_OP_DEL_ENTRY;
+ ureq.pcifunc[0] = pcifunc;
+ ureq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
+int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx, u16 mcam_index)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, ret = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
+ if (!elem)
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ else
+ elem->mcam_index = mcam_index;
+
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+ return ret;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
+ struct nix_mcast_grp_create_req *req,
+ struct nix_mcast_grp_create_rsp *rsp)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, err;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
+ elem->mcam_index = -1;
+ elem->mce_start_index = -1;
+ elem->pcifunc = req->hdr.pcifunc;
+ elem->dir = req->dir;
+ elem->mcast_grp_idx = mcast_grp->next_grp_index++;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
+ mcast_grp->count++;
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ rsp->mcast_grp_idx = elem->mcast_grp_idx;
+ return 0;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
+ struct nix_mcast_grp_destroy_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ int blkaddr, err, ret = 0;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ /* If AF is requesting for the deletion,
+ * then AF is already taking the lock
+ */
+ if (!req->is_af)
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
+ if (!elem) {
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ goto unlock_grp;
+ }
+
+ /* If no mce entries are associated with the group
+ * then just remove it from the global list.
+ */
+ if (!elem->mcast_mce_list.count)
+ goto delete_grp;
+
+ /* Delete the associated mcam entry and
+ * remove all mce entries from the group
+ */
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+ if (elem->mcam_index != -1) {
+ uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
+ uninstall_req.entry = elem->mcam_index;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ }
+
+ nix_free_mce_list(mcast, elem->mcast_mce_list.count,
+ elem->mce_start_index, elem->dir);
+ nix_delete_mcast_mce_list(&elem->mcast_mce_list);
+ mutex_unlock(&mcast->mce_lock);
+
+delete_grp:
+ list_del(&elem->list);
+ kfree(elem);
+ mcast_grp->count--;
+
+unlock_grp:
+ if (!req->is_af)
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ return ret;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
+ struct nix_mcast_grp_update_req *req,
+ struct nix_mcast_grp_update_rsp *rsp)
+{
+ struct nix_mcast_grp_destroy_req dreq = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ int blkaddr, err, npc_blkaddr;
+ u16 prev_count, new_count;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ int i, ret;
+
+ if (!req->num_mce_entry)
+ return 0;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ /* If AF is requesting for the updation,
+ * then AF is already taking the lock
+ */
+ if (!req->is_af)
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
+ if (!elem) {
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ goto unlock_grp;
+ }
+
+ /* If any pcifunc matches the group's pcifunc, then we can
+ * delete the entire group.
+ */
+ if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
+ for (i = 0; i < req->num_mce_entry; i++) {
+ if (elem->pcifunc == req->pcifunc[i]) {
+ /* Delete group */
+ dreq.hdr.pcifunc = elem->pcifunc;
+ dreq.mcast_grp_idx = elem->mcast_grp_idx;
+ dreq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
+ ret = 0;
+ goto unlock_grp;
+ }
+ }
+ }
+
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
+
+ prev_count = elem->mcast_mce_list.count;
+ if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
+ new_count = prev_count + req->num_mce_entry;
+ if (prev_count)
+ nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
+
+ elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
+
+ /* It is possible not to get contiguous memory */
+ if (elem->mce_start_index < 0) {
+ if (elem->mcam_index != -1) {
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+ ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
+ goto unlock_mce;
+ }
+ }
+
+ ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
+ if (ret) {
+ nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
+ if (prev_count)
+ elem->mce_start_index = nix_alloc_mce_list(mcast,
+ prev_count,
+ elem->dir);
+
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+
+ goto unlock_mce;
+ }
+ } else {
+ if (!prev_count || prev_count < req->num_mce_entry) {
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+ ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+ goto unlock_mce;
+ }
+
+ nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
+ new_count = prev_count - req->num_mce_entry;
+ elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
+ ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
+ if (ret) {
+ nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
+ elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index,
+ true);
+
+ goto unlock_mce;
+ }
+ }
+
+ if (elem->mcam_index == -1) {
+ rsp->mce_start_index = elem->mce_start_index;
+ ret = 0;
+ goto unlock_mce;
+ }
+
+ nix_mcast_update_action(rvu, elem);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
+ rsp->mce_start_index = elem->mce_start_index;
+ ret = 0;
+
+unlock_mce:
+ mutex_unlock(&mcast->mce_lock);
+
+unlock_grp:
+ if (!req->is_af)
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0bcf3e559280..516adb50f9f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
-static int npc_mcam_verify_pf_func(struct rvu *rvu,
- struct mcam_entry *entry_data, u8 intf,
- u16 pcifunc)
-{
- u16 pf_func, pf_func_mask;
-
- if (is_npc_intf_rx(intf))
- return 0;
-
- pf_func_mask = (entry_data->kw_mask[0] >> 32) &
- NPC_KEX_PF_FUNC_MASK;
- pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
-
- pf_func = be16_to_cpu((__force __be16)pf_func);
- if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
- ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
- (pcifunc & ~RVU_PFVF_FUNC_MASK)))
- return -EINVAL;
-
- return 0;
-}
-
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -437,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
+ /* AF modifies given action iff PF/VF has requested for it */
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+ return;
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -595,8 +577,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
}
-static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index)
+u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
@@ -605,6 +587,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
}
+void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u64 cfg)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg);
+}
+
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
@@ -1836,7 +1828,21 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
}
-static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+void npc_mcam_rsrcs_deinit(struct rvu *rvu)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ bitmap_free(mcam->bmap);
+ bitmap_free(mcam->bmap_reverse);
+ kfree(mcam->entry2pfvf_map);
+ kfree(mcam->cntr2pfvf_map);
+ kfree(mcam->entry2cntr_map);
+ kfree(mcam->cntr_refcnt);
+ kfree(mcam->entry2target_pffunc);
+ kfree(mcam->counters.bmap);
+}
+
+int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -1880,24 +1886,22 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */
- mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap)
return -ENOMEM;
- mcam->bmap_reverse = devm_kcalloc(rvu->dev,
- BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap_reverse)
- return -ENOMEM;
+ goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
- mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
+
if (!mcam->entry2pfvf_map)
- return -ENOMEM;
+ goto free_bmap_reverse;
/* Reserve 1/8th of MCAM entries at the bottom for low priority
* allocations and another 1/8th at the top for high priority
@@ -1916,31 +1920,31 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
*/
err = rvu_alloc_bitmap(&mcam->counters);
if (err)
- return err;
+ goto free_entry_map;
- mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr2pfvf_map)
- goto free_mem;
+ goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking
* counter's reference count.
*/
- mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->entry2cntr_map)
- goto free_mem;
+ goto free_cntr_map;
- mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr_refcnt)
- goto free_mem;
+ goto free_entry_cntr_map;
/* Alloc memory for saving target device of mcam rule */
- mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2target_pffunc = kmalloc_array(mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
if (!mcam->entry2target_pffunc)
- goto free_mem;
+ goto free_cntr_refcnt;
for (index = 0; index < mcam->bmap_entries; index++) {
mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
@@ -1954,8 +1958,21 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
return 0;
-free_mem:
+free_cntr_refcnt:
+ kfree(mcam->cntr_refcnt);
+free_entry_cntr_map:
+ kfree(mcam->entry2cntr_map);
+free_cntr_map:
+ kfree(mcam->cntr2pfvf_map);
+free_cntr_bmap:
kfree(mcam->counters.bmap);
+free_entry_map:
+ kfree(mcam->entry2pfvf_map);
+free_bmap_reverse:
+ bitmap_free(mcam->bmap_reverse);
+free_bmap:
+ bitmap_free(mcam->bmap);
+
return -ENOMEM;
}
@@ -2163,6 +2180,7 @@ void rvu_npc_freemem(struct rvu *rvu)
struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap);
+ npc_mcam_rsrcs_deinit(rvu);
kfree(mcam->counters.bmap);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
@@ -2678,18 +2696,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->entry = NPC_MCAM_ENTRY_INVALID;
rsp->free_count = 0;
- /* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries) {
- dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
- __func__, req->ref_entry);
- return NPC_MCAM_INVALID_REQ;
- }
+ /* Check if ref_entry is greater that the range
+ * then set it to max value.
+ */
+ if (req->ref_entry > mcam->bmap_entries)
+ req->ref_entry = mcam->bmap_entries;
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
*/
if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
- ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ ((req->ref_entry == mcam->bmap_entries) &&
req->priority == NPC_MCAM_LOWER_PRIO))
return NPC_MCAM_INVALID_REQ;
@@ -2816,12 +2833,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (!is_pffunc_af(pcifunc) &&
- npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
- rc = NPC_MCAM_INVALID_REQ;
- goto exit;
- }
-
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@@ -3173,10 +3184,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- req->hdr.pcifunc))
- return NPC_MCAM_INVALID_REQ;
-
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 114e4ec21802..c75669c8fde7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -51,6 +51,8 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS3_TTL] = "lse depth 3 ttl",
[NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
[NPC_MPLS4_TTL] = "lse depth 4",
+ [NPC_TYPE_ICMP] = "icmp type",
+ [NPC_CODE_ICMP] = "icmp code",
[NPC_UNKNOWN] = "unknown",
};
@@ -526,6 +528,8 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
+ NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -555,7 +559,7 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
- u64 tcp_udp_sctp;
+ u64 proto_flags;
int hdr;
if (is_npc_intf_tx(intf))
@@ -566,18 +570,21 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
*features |= BIT_ULL(hdr);
}
- tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ proto_flags = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
- BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp) {
+ if (*features & proto_flags) {
if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
- *features &= ~tcp_udp_sctp;
+ *features &= ~proto_flags;
else
*features |= BIT_ULL(NPC_IPPROTO_TCP) |
BIT_ULL(NPC_IPPROTO_UDP) |
- BIT_ULL(NPC_IPPROTO_SCTP);
+ BIT_ULL(NPC_IPPROTO_SCTP) |
+ BIT_ULL(NPC_IPPROTO_ICMP);
}
/* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
@@ -971,6 +978,10 @@ do { \
ntohs(mask->sport), 0);
NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_TYPE_ICMP, icmp_type, pkt->icmp_type, 0,
+ mask->icmp_type, 0);
+ NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
+ mask->icmp_code, 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
@@ -1106,13 +1117,40 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
}
}
-static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
- struct mcam_entry *entry,
- struct npc_install_flow_req *req,
- u16 target, bool pf_set_vfs_mac)
+static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
+ u64 op, void *action)
+{
+ int mce_index;
+
+ /* If a PF/VF is installing a multicast rule then it is expected
+ * that the PF/VF should have created a group for the multicast/mirror
+ * list. Otherwise reject the configuration.
+ * During this scenario, req->index is set as multicast/mirror
+ * group index.
+ */
+ if (req->hdr.pcifunc &&
+ (op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
+ mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index);
+ if (mce_index < 0)
+ return mce_index;
+
+ if (op == NIX_RX_ACTIONOP_MCAST)
+ ((struct nix_rx_action *)action)->index = mce_index;
+ else
+ ((struct nix_tx_action *)action)->index = mce_index;
+ }
+
+ return 0;
+}
+
+static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
{
struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
+ int ret;
if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
req->chan_mask = 0x0; /* Do not care channel */
@@ -1124,6 +1162,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = req->op;
action.index = req->index;
+
+ ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
+ if (ret)
+ return ret;
+
action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg;
@@ -1155,14 +1198,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+
+ return 0;
}
-static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
- struct mcam_entry *entry,
- struct npc_install_flow_req *req, u16 target)
+static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
u64 mask = ~0ULL;
+ int ret;
/* If AF is installing then do not care about
* PF_FUNC in Send Descriptor
@@ -1176,6 +1222,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
*(u64 *)&action = 0x00;
action.op = req->op;
action.index = req->index;
+
+ ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
+ if (ret)
+ return ret;
+
action.match_id = req->match_id;
entry->action = *(u64 *)&action;
@@ -1191,6 +1242,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+
+ return 0;
}
static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
@@ -1220,10 +1273,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
req->intf, blkaddr);
- if (is_npc_intf_rx(req->intf))
- npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
- else
- npc_update_tx_entry(rvu, pfvf, entry, req, target);
+ if (is_npc_intf_rx(req->intf)) {
+ err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ if (err)
+ return err;
+ } else {
+ err = npc_update_tx_entry(rvu, pfvf, entry, req, target);
+ if (err)
+ return err;
+ }
/* Default unicast rules do not exist for TX */
if (is_npc_intf_tx(req->intf))
@@ -1340,6 +1398,10 @@ find_rule:
return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
req->index, req->match_id);
+ if (owner && req->op == NIX_RX_ACTIONOP_MCAST)
+ return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc,
+ req->index, entry_index);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 18c1c9f361cc..6f73ad9807f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -734,5 +734,7 @@
#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+#define LMTST_THROTTLE_MASK GENMASK_ULL(38, 35)
+#define LMTST_WR_PEND_MAX 15
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index edc9367b1b95..5ef406c7e8a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -340,11 +340,12 @@ struct nix_aq_res_s {
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
- u64 rsvd_64_67 : 4;
+ u64 lbp_ena : 1;
+ u64 lbpid_low : 3;
u64 bp_ena : 1;
- u64 rsvd_69_71 : 3;
+ u64 lbpid_med : 3;
u64 bpid : 9;
- u64 rsvd_81_83 : 3;
+ u64 lbpid_high : 3;
u64 qint_idx : 7;
u64 cq_err : 1;
u64 cint_idx : 7;
@@ -358,10 +359,14 @@ struct nix_cq_ctx_s {
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
- u64 rsvd_210_211 : 2;
- u64 substream : 20;
+ u64 cpt_drop_err_en : 1;
+ u64 rsvd_211 : 1;
+ u64 substream : 12;
+ u64 stash_thresh : 4;
+ u64 lbp_frac : 4;
u64 caching : 1;
- u64 rsvd_233_235 : 3;
+ u64 stashing : 1;
+ u64 rsvd_234_235 : 2;
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 7ca6941ea0b9..02d0b707aea5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
- if (err)
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
return err;
+ }
}
sq->head = 0;
@@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 53f6258a973c..7f786de61014 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev,
pfvf->hw.tx_queues = channel->tx_count;
if (pfvf->xdp_prog)
pfvf->hw.xdp_queues = channel->rx_count;
- pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
if (if_up)
err = dev->netdev_ops->ndo_open(dev);
@@ -835,21 +834,26 @@ static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
return 0;
}
-/* RSS context configuration */
-static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc,
- u32 *rss_context, bool delete)
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
int ret, idx;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
- *rss_context >= MAX_RSS_GROUPS)
+ if (rxfh->rss_context)
+ rss_context = rxfh->rss_context;
+
+ if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ rss_context >= MAX_RSS_GROUPS)
return -EINVAL;
rss = &pfvf->hw.rss_info;
@@ -859,40 +863,45 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
return -EIO;
}
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
+ if (rxfh->key) {
+ memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
- if (delete)
- return otx2_rss_ctx_delete(pfvf, *rss_context);
+ if (rxfh->rss_delete)
+ return otx2_rss_ctx_delete(pfvf, rss_context);
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, &rss_context);
+ rxfh->rss_context = rss_context;
if (ret)
return ret;
}
- if (indir) {
- rss_ctx = rss->rss_ctx[*rss_context];
+ if (rxfh->indir) {
+ rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
}
- otx2_set_rss_table(pfvf, *rss_context);
+ otx2_set_rss_table(pfvf, rss_context);
return 0;
}
-static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc, u32 rss_context)
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
+ u32 *indir = rxfh->indir;
int idx, rx_queues;
rss = &pfvf->hw.rss_info;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->rss_context)
+ rss_context = rxfh->rss_context;
if (!indir)
return 0;
@@ -914,30 +923,12 @@ static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
for (idx = 0; idx < rss->rss_size; idx++)
indir[idx] = rss_ctx->ind_tbl[idx];
}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->key, sizeof(rss->key));
return 0;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
-{
- return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
- DEFAULT_RSS_CONTEXT_GROUP);
-}
-
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
-{
-
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
-
- return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
-}
-
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -1318,6 +1309,7 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1340,8 +1332,6 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
- .get_rxfh_context = otx2_get_rxfh_context,
- .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1441,6 +1431,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1459,8 +1450,6 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
- .get_rxfh_context = otx2_get_rxfh_context,
- .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a57455aebff6..e5fe67e73865 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev)
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
+ pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues;
pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
pf->hw.tc_tx_queues);
@@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
xdp_features_clear_redirect_target(dev);
}
- pf->hw.non_qos_queues += pf->hw.xdp_queues;
-
if (if_up)
otx2_open(pf->netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index db1e0e0e812d..4fd44b6eecea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -29,6 +29,8 @@
#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+#define MCAST_INVALID_GRP (-1U)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -47,6 +49,7 @@ struct otx2_tc_flow {
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
+ u32 mcast_grp_idx;
u64 rate;
u32 burst;
bool is_pps;
@@ -355,22 +358,96 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc;
}
+static int otx2_tc_update_mcast(struct otx2_nic *nic,
+ struct npc_install_flow_req *req,
+ struct netlink_ext_ack *extack,
+ struct otx2_tc_flow *node,
+ struct nix_mcast_grp_update_req *ureq,
+ u8 num_intf)
+{
+ struct nix_mcast_grp_update_req *grp_update_req;
+ struct nix_mcast_grp_create_req *creq;
+ struct nix_mcast_grp_create_rsp *crsp;
+ u32 grp_index;
+ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+ creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
+ if (!creq) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ creq->dir = NIX_MCAST_INGRESS;
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
+ goto error;
+ }
+
+ crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0,
+ &creq->hdr);
+ if (IS_ERR(crsp)) {
+ rc = PTR_ERR(crsp);
+ goto error;
+ }
+
+ grp_index = crsp->mcast_grp_idx;
+ grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
+ if (!grp_update_req) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ ureq->op = NIX_MCAST_OP_ADD_ENTRY;
+ ureq->mcast_grp_idx = grp_index;
+ ureq->num_mce_entry = num_intf;
+ ureq->pcifunc[0] = nic->pcifunc;
+ ureq->channel[0] = nic->hw.tx_chan_base;
+
+ ureq->dest_type[0] = NIX_RX_RSS;
+ ureq->rq_rss_index[0] = 0;
+ memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ goto error;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ req->op = NIX_RX_ACTIONOP_MCAST;
+ req->index = grp_index;
+ node->mcast_grp_idx = grp_index;
+ return 0;
+
+error:
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req,
struct flow_cls_offload *f,
struct otx2_tc_flow *node)
{
+ struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
struct netlink_ext_ack *extack = f->common.extack;
+ bool pps = false, mcast = false;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps = false;
+ u8 num_intf = 1;
+ int err, i;
u64 rate;
- int err;
- int i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
@@ -442,11 +519,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
req->index = act->rx_queue;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
+ dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
+ dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
+ dummy_grp_update_req.rq_rss_index[num_intf] = 0;
+ mcast = true;
+ num_intf++;
+ break;
+
default:
return -EOPNOTSUPP;
}
}
+ if (mcast) {
+ err = otx2_tc_update_mcast(nic, req, extack, node,
+ &dummy_grp_update_req,
+ num_intf);
+ if (err)
+ return err;
+ }
+
if (nr_police > 1) {
NL_SET_ERR_MSG_MOD(extack,
"rate limit police offload requires a single action");
@@ -541,6 +637,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -815,6 +912,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+
+ flow_rule_match_icmp(rule, &match);
+
+ flow_spec->icmp_type = match.key->type;
+ flow_mask->icmp_type = match.mask->type;
+ req->features |= BIT_ULL(NPC_TYPE_ICMP);
+
+ flow_spec->icmp_code = match.key->code;
+ flow_mask->icmp_code = match.mask->code;
+ req->features |= BIT_ULL(NPC_CODE_ICMP);
+ }
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
@@ -1052,6 +1162,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct nix_mcast_grp_destroy_req *grp_destroy_req;
struct otx2_tc_flow *flow_node;
int err;
@@ -1085,6 +1196,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock);
}
+ /* Remove the multicast/mirror related nodes */
+ if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
+ mutex_lock(&nic->mbox.lock);
+ grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
+ grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
+ otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ }
+
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
@@ -1124,6 +1244,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
new_node->prio = tc_flow_cmd->common.prio;
+ new_node->mcast_grp_idx = MCAST_INVALID_GRP;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 4d519ea833b2..f828d32737af 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
bool *need_xdp_flush)
{
- unsigned char *hard_start, *data;
+ unsigned char *hard_start;
int qidx = cq->cq_idx;
struct xdp_buff xdp;
struct page *page;
@@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
- data = (unsigned char *)phys_to_virt(pa);
- hard_start = page_address(page);
- xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ hard_start = (unsigned char *)phys_to_virt(pa);
+ xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
cqe->sg.seg_size, false);
act = bpf_prog_run_xdp(prog, &xdp);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3cf6589cfdac..de123350bd46 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1159,15 +1159,18 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
+ dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
struct mtk_tx_dma_v2 *txd;
txd = eth->scratch_ring + i * soc->txrx.txd_size;
- txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ txd->txd1 = addr;
if (i < cnt - 1)
txd->txd2 = eth->phy_scratch_ring +
(i + 1) * soc->txrx.txd_size;
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
txd->txd4 = 0;
if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
@@ -4758,7 +4761,10 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
+ if (!err)
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
if (err) {
dev_err(&pdev->dev, "Wrong DMA config\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 9a6744c0d458..c895e265ae0e 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -670,7 +670,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
void *buf;
int s;
- page = __dev_alloc_pages(GFP_KERNEL, 0);
+ page = __dev_alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
@@ -691,10 +691,11 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
struct mtk_wdma_desc *desc = desc_ptr;
+ u32 ctrl;
desc->buf0 = cpu_to_le32(buf_phys);
if (!mtk_wed_is_v3_or_greater(dev->hw)) {
- u32 txd_size, ctrl;
+ u32 txd_size;
txd_size = dev->wlan.init_buf(buf, buf_phys,
token++);
@@ -708,11 +709,11 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
MTK_WED_BUF_SIZE - txd_size);
- desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
} else {
- desc->ctrl = cpu_to_le32(token << 16);
+ ctrl = token << 16 | TX_DMA_PREP_ADDR64(buf_phys);
}
+ desc->ctrl = cpu_to_le32(ctrl);
desc_ptr += desc_size;
buf += MTK_WED_BUF_SIZE;
@@ -811,6 +812,7 @@ mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
buf_phys = page_phys;
for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
desc->buf0 = cpu_to_le32(buf_phys);
+ desc->token = cpu_to_le32(RX_DMA_PREP_ADDR64(buf_phys));
buf_phys += MTK_WED_PAGE_BUF_SIZE;
desc++;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index ae44ad5f8ce8..d58b07e7e123 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -142,7 +142,8 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
dma_addr_t addr;
void *buf;
- buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
+ buf = page_frag_alloc(&q->cache, q->buf_size,
+ GFP_ATOMIC | GFP_DMA32);
if (!buf)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 164a13272faa..619e1c3ef7f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1258,8 +1258,8 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
return -EINVAL;
}
-static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
- u8 *hfunc)
+static int mlx4_en_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
@@ -1269,19 +1269,19 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
rss_rings = rounddown_pow_of_two(rss_rings);
for (i = 0; i < n; i++) {
- if (!ring_index)
+ if (!rxfh->indir)
break;
- ring_index[i] = i % rss_rings;
+ rxfh->indir[i] = i % rss_rings;
}
- if (key)
- memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
- if (hfunc)
- *hfunc = priv->rss_hash_fn;
+ if (rxfh->key)
+ memcpy(rxfh->key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
+ rxfh->hfunc = priv->rss_hash_fn;
return 0;
}
-static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
- const u8 *key, const u8 hfunc)
+static int mlx4_en_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
@@ -1295,12 +1295,12 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
* between rings
*/
for (i = 0; i < n; i++) {
- if (!ring_index)
+ if (!rxfh->indir)
break;
- if (i > 0 && !ring_index[i] && !rss_rings)
+ if (i > 0 && !rxfh->indir[i] && !rss_rings)
rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: n)))
+ if (rxfh->indir[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
@@ -1311,8 +1311,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
if (!is_power_of_2(rss_rings))
return -EINVAL;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
- err = mlx4_en_check_rxfh_func(dev, hfunc);
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ err = mlx4_en_check_rxfh_func(dev, rxfh->hfunc);
if (err)
return err;
}
@@ -1323,12 +1323,12 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
mlx4_en_stop_port(dev, 1);
}
- if (ring_index)
+ if (rxfh->indir)
priv->prof->rss_rings = rss_rings;
- if (key)
- memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE)
- priv->rss_hash_fn = hfunc;
+ if (rxfh->key)
+ memcpy(priv->rss_key, rxfh->key, MLX4_EN_RSS_KEY_SIZE);
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->rss_hash_fn = rxfh->hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a7b1f9686c09..4957412ff1f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
+ unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode);
if (!stats)
return;
- spin_lock_irq(&stats->lock);
+ spin_lock_irqsave(&stats->lock, flags);
stats->failed++;
if (err < 0)
stats->last_failed_errno = -err;
@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome;
}
- spin_unlock_irq(&stats->lock);
+ spin_unlock_irqrestore(&stats->lock, flags);
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6fe..98d4306929f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+ !dev->priv.fw_reset) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+ return -EOPNOTSUPP;
+ }
+
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
index 28d02749d3c4..7659ad21e6e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
@@ -55,7 +55,10 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data)
ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET,
MLX5_VSC_LOCK);
if (ret) {
- mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
+ if (ret == -EBUSY)
+ mlx5_core_info(dev, "SW reset semaphore is already in use\n");
+ else
+ mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
goto unlock_gw;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 2cd81bb32c66..d74a5aaf4268 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -36,11 +36,17 @@ static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
return 0;
}
+struct mlx5_dpll_synce_status {
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ bool ho_acq;
+ bool oper_freq_measure;
+ s32 frequency_diff;
+};
+
static int
mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
- enum mlx5_msees_admin_status *admin_status,
- enum mlx5_msees_oper_status *oper_status,
- bool *ho_acq)
+ struct mlx5_dpll_synce_status *synce_status)
{
u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
@@ -50,11 +56,11 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
MLX5_REG_MSEES, 0, 0);
if (err)
return err;
- if (admin_status)
- *admin_status = MLX5_GET(msees_reg, out, admin_status);
- *oper_status = MLX5_GET(msees_reg, out, oper_status);
- if (ho_acq)
- *ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status);
+ synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
+ synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -67,21 +73,23 @@ mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
MLX5_SET(msees_reg, in, field_select,
MLX5_MSEES_FIELD_SELECT_ENABLE |
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE |
MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
MLX5_SET(msees_reg, in, admin_status, admin_status);
+ MLX5_SET(msees_reg, in, admin_freq_measure, true);
return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MSEES, 0, 1);
}
static enum dpll_lock_status
-mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
+mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
{
- switch (oper_status) {
+ switch (synce_status->oper_status) {
case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
fallthrough;
case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
- return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
- DPLL_LOCK_STATUS_LOCKED;
+ return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
+ DPLL_LOCK_STATUS_LOCKED;
case MLX5_MSEES_OPER_STATUS_HOLDOVER:
fallthrough;
case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
@@ -92,31 +100,37 @@ mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
}
static enum dpll_pin_state
-mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status,
- enum mlx5_msees_oper_status oper_status)
+mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
- return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
- (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
- oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
+ return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
+ (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
+ synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
}
+static int
+mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
+ s64 *ffo)
+{
+ if (!synce_status->oper_freq_measure)
+ return -ENODATA;
+ *ffo = synce_status->frequency_diff;
+ return 0;
+}
+
static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
void *priv,
enum dpll_lock_status *status,
struct netlink_ext_ack *extack)
{
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
- bool ho_acq;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL,
- &oper_status, &ho_acq);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
return err;
-
- *status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ *status = mlx5_dpll_lock_status_get(&synce_status);
return 0;
}
@@ -128,18 +142,9 @@ static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
return 0;
}
-static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll,
- void *priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- return mode == DPLL_MODE_MANUAL;
-}
-
static const struct dpll_device_ops mlx5_dpll_device_ops = {
.lock_status_get = mlx5_dpll_device_lock_status_get,
.mode_get = mlx5_dpll_device_mode_get,
- .mode_supported = mlx5_dpll_device_mode_supported,
};
static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
@@ -160,16 +165,14 @@ static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
enum dpll_pin_state *state,
struct netlink_ext_ack *extack)
{
- enum mlx5_msees_admin_status admin_status;
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = pin_priv;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
- &oper_status, NULL);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
return err;
- *state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ *state = mlx5_dpll_pin_state_get(&synce_status);
return 0;
}
@@ -188,10 +191,25 @@ static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
}
+static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll_synce_status synce_status;
+ struct mlx5_dpll *mdpll = pin_priv;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
+ if (err)
+ return err;
+ return mlx5_dpll_pin_ffo_get(&synce_status, ffo);
+}
+
static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
.direction_get = mlx5_dpll_pin_direction_get,
.state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
.state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
+ .ffo_get = mlx5_dpll_ffo_get,
};
static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
@@ -211,19 +229,16 @@ static void mlx5_dpll_periodic_work(struct work_struct *work)
{
struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
work.work);
- enum mlx5_msees_admin_status admin_status;
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
enum dpll_lock_status lock_status;
enum dpll_pin_state pin_state;
- bool ho_acq;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
- &oper_status, &ho_acq);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
goto err_out;
- lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
- pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ lock_status = mlx5_dpll_lock_status_get(&synce_status);
+ pin_state = mlx5_dpll_pin_state_get(&synce_status);
if (!mdpll->last.valid)
goto invalid_out;
@@ -246,7 +261,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
{
if (mdpll->tracking_netdev)
return;
- netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
mdpll->tracking_netdev = netdev;
}
@@ -254,7 +269,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
{
if (!mdpll->tracking_netdev)
return;
- netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ dpll_netdev_pin_clear(mdpll->tracking_netdev);
mdpll->tracking_netdev = NULL;
}
@@ -374,7 +389,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
struct mlx5_core_dev *mdev = mdpll->mdev;
- cancel_delayed_work(&mdpll->work);
+ cancel_delayed_work_sync(&mdpll->work);
mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 729a11b5fb25..55c6ace0acd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -72,7 +72,6 @@ struct page_pool;
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
-#define MLX5E_MAX_NUM_TC 8
#define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE
#define MLX5_RX_HEADROOM NET_SKB_PAD
@@ -364,7 +363,7 @@ struct mlx5e_cq {
/* control */
struct net_device *netdev;
struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
+ struct workqueue_struct *workqueue;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
@@ -484,10 +483,12 @@ struct mlx5e_xdp_info_fifo {
struct mlx5e_xdpsq;
struct mlx5e_xmit_data;
+struct xsk_tx_metadata;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- int);
+ int,
+ struct xsk_tx_metadata *);
struct mlx5e_xdpsq {
/* data path */
@@ -756,7 +757,7 @@ struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_xdpsq rq_xdpsq;
- struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
bool xdp;
@@ -806,7 +807,7 @@ struct mlx5e_channels {
struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
- struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
struct mlx5e_rq_stats xskrq;
struct mlx5e_xdpsq_stats rq_xdpsq;
@@ -816,8 +817,8 @@ struct mlx5e_channel_stats {
struct mlx5e_ptp_stats {
struct mlx5e_ch_stats ch;
- struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
- struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC];
+ struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
@@ -885,7 +886,6 @@ struct mlx5e_priv {
struct mlx5e_rq drop_rq;
struct mlx5e_channels channels;
- u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
@@ -983,6 +983,8 @@ struct mlx5e_profile {
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
int (*max_nch_limit)(struct mlx5_core_dev *mdev);
+ u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv,
+ u8 lag_port, u8 tc);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
@@ -990,6 +992,11 @@ struct mlx5e_profile {
u32 features;
};
+u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
+ u8 lag_port, u8 tc);
+
#define mlx5e_profile_feature_cap(profile, feature) \
((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
@@ -1037,6 +1044,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
struct mlx5e_create_cq_param {
+ struct net_device *netdev;
+ struct workqueue_struct *wq;
struct napi_struct *napi;
struct mlx5e_ch_stats *ch_stats;
int node;
@@ -1044,7 +1053,7 @@ struct mlx5e_create_cq_param {
};
struct mlx5e_cq_param;
-int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
@@ -1115,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);
@@ -1131,8 +1140,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
-int mlx5e_create_tises(struct mlx5e_priv *priv);
-void mlx5e_destroy_tises(struct mlx5e_priv *priv);
int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
@@ -1174,9 +1181,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc);
+int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
+int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index e1283531e0b8..671adbad0a40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
+ ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 254c84739046..40c8df111754 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -36,7 +36,7 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
index e1ac4b3d22fb..6beba7f075c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
@@ -7,6 +7,5 @@
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
-void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
#endif /* __MLX5_MONITOR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index e097f336e1c4..5d213a9886f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -669,6 +669,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
{
*ccp = (struct mlx5e_create_cq_param) {
+ .netdev = c->netdev,
+ .wq = c->priv->wq,
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
@@ -1062,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp =
- mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index af3928eddafd..ca05b3252a1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
- spin_lock(&cqe_list->tracker_list_lock);
+ spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
- spin_unlock(&cqe_list->tracker_list_lock);
+ spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
- md_buff[*md_buff_sz++] = metadata_id;
+ md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
@@ -518,9 +518,11 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
for (tc = 0; tc < num_tc; tc++) {
int txq_ix = ix_base + tc;
+ u32 tisn;
- err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- cparams, tc, &c->ptpsq[tc]);
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, tc);
+ err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]);
if (err)
goto close_txqsq;
}
@@ -555,6 +557,8 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
num_tc = mlx5e_get_dcb_num_tc(params);
+ ccp.netdev = c->netdev;
+ ccp.wq = c->priv->wq;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -565,7 +569,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
- err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_txqsq_cq;
}
@@ -574,7 +578,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
- err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_ts_cq;
@@ -602,6 +606,8 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
struct mlx5e_cq_param *cq_param;
struct mlx5e_cq *cq = &c->rq.cq;
+ ccp.netdev = c->netdev;
+ ccp.wq = c->priv->wq;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -609,7 +615,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
cq_param = &cparams->rq_param.cqp;
- return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
}
static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 7b700d0f956a..86f1854698b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -49,7 +49,7 @@ enum {
struct mlx5e_ptp {
/* data path */
- struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_ptpsq ptpsq[MLX5_MAX_NUM_TC];
struct mlx5e_rq rq;
struct napi_struct napi;
struct device *pdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 244bc15a42ab..34adf8c3f81a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -77,6 +77,7 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
struct mlx5e_params *params;
struct mlx5e_channel *c;
struct mlx5e_txqsq *sq;
+ u32 tisn;
params = &chs->params;
@@ -123,11 +124,13 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_cq, 0, sizeof(param_cq));
mlx5e_build_sq_param(priv->mdev, params, &param_sq);
mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
- err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
- err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, hw_id,
+
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, 0);
+ err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &param_sq, sq, 0, hw_id,
priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b12fe3c5a258..a55452c69f06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -147,6 +147,20 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
}
}
+static void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ u64 dbytes;
+ u64 dpkts;
+
+ dpkts = priv->stats.rep_stats.vport_rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ dbytes = priv->stats.rep_stats.vport_rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, &priv->stats.rep_stats);
+ flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
+ FLOW_ACTION_HW_STATS_DELAYED);
+}
+
static
int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 368a95fa77d3..b14cd62edffc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -48,7 +48,8 @@ mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
- u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? TCA_PEDIT_KEY_EX_CMD_SET :
+ TCA_PEDIT_KEY_EX_CMD_ADD;
u8 htype = act->mangle.htype;
int err = -EOPNOTSUPP;
u32 mask, val, offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 86bf007fd05b..b500cc2c9689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 5620d9f97518..ac458a8d10e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -68,11 +68,13 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
node = dev_to_node(mdev->device);
+ ccp.netdev = priv->netdev;
+ ccp.wq = priv->wq;
ccp.node = node;
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
- err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
+ err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 13c7ed1bb37e..82b5ca1be4f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -103,7 +103,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
return false;
/* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
@@ -145,7 +145,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
return false;
/* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
@@ -256,9 +256,55 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+ const struct mlx5_cqe64 *cqe = _ctx->cqe;
+
+ if (!cqe_has_vlan(cqe))
+ return -ENODATA;
+
+ *vlan_proto = htons(ETH_P_8021Q);
+ *vlan_tci = be16_to_cpu(cqe->vlan_info);
+ return 0;
+}
+
const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
.xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
.xmo_rx_hash = mlx5e_xdp_rx_hash,
+ .xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag,
+};
+
+struct mlx5e_xsk_tx_complete {
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_cq *cq;
+};
+
+static u64 mlx5e_xsk_fill_timestamp(void *_priv)
+{
+ struct mlx5e_xsk_tx_complete *priv = _priv;
+ u64 ts;
+
+ ts = get_cqe_ts(priv->cqe);
+
+ if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
+ return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+
+ return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+}
+
+static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
+{
+ struct mlx5_wqe_eth_seg *eseg = priv;
+
+ /* HW/FW is doing parsing, so offsets are largely ignored. */
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+}
+
+const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = {
+ .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp,
+ .tmo_request_checksum = mlx5e_xsk_request_checksum,
};
/* returns true if packet was consumed by xdp */
@@ -398,11 +444,11 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result);
+ int check_result, struct xsk_tx_metadata *meta);
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result)
+ int check_result, struct xsk_tx_metadata *meta)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -420,7 +466,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
*/
if (unlikely(sq->mpwqe.wqe))
mlx5e_xdp_mpwqe_complete(sq);
- return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, 0, meta);
}
if (!xdptxd->len) {
skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
@@ -450,6 +496,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
* and it's safe to complete it at any time.
*/
mlx5e_xdp_mpwqe_session_start(sq);
+ xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, &session->wqe->eth);
}
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
@@ -480,7 +527,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result)
+ int check_result, struct xsk_tx_metadata *meta)
{
struct mlx5e_xmit_data_frags *xdptxdf =
container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
@@ -601,6 +648,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
sq->pc++;
}
+ xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
+
sq->doorbell_cseg = cseg;
stats->xmit++;
@@ -610,7 +659,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- struct xdp_frame_bulk *bq)
+ struct xdp_frame_bulk *bq,
+ struct mlx5e_cq *cq,
+ struct mlx5_cqe64 *cqe)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
@@ -670,10 +721,24 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break;
}
- case MLX5E_XDP_XMIT_MODE_XSK:
+ case MLX5E_XDP_XMIT_MODE_XSK: {
/* AF_XDP send */
+ struct xsk_tx_metadata_compl *compl = NULL;
+ struct mlx5e_xsk_tx_complete priv = {
+ .cqe = cqe,
+ .cq = cq,
+ };
+
+ if (xp_tx_metadata_enabled(sq->xsk_pool)) {
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ compl = &xdpi.xsk_meta;
+
+ xsk_tx_metadata_complete(compl, &mlx5e_xsk_tx_metadata_ops, &priv);
+ }
+
(*xsk_frames)++;
break;
+ }
default:
WARN_ON_ONCE(true);
}
@@ -722,7 +787,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, cq, cqe);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -769,7 +834,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, NULL, NULL);
}
xdp_flush_frame_bulk(&bq);
@@ -842,7 +907,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL);
if (unlikely(!ret)) {
int j;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ecfe93a479da..e054db1e10f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -33,6 +33,7 @@
#define __MLX5_EN_XDP_H__
#include <linux/indirect_call_wrapper.h>
+#include <net/xdp_sock.h>
#include "en.h"
#include "en/txrx.h"
@@ -82,7 +83,7 @@ enum mlx5e_xdp_xmit_mode {
* num, page_1, page_2, ... , page_num.
*
* MLX5E_XDP_XMIT_MODE_XSK:
- * none.
+ * frame.xsk_meta.
*/
#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
@@ -97,6 +98,7 @@ union mlx5e_xdp_info {
u8 num;
struct page *page;
} page;
+ struct xsk_tx_metadata_compl xsk_meta;
};
struct mlx5e_xsk_param;
@@ -112,13 +114,16 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+extern const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops;
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- int check_result));
+ int check_result,
+ struct xsk_tx_metadata *meta));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- int check_result));
+ int check_result,
+ struct xsk_tx_metadata *meta));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 36826b582484..82e6abbc1734 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -127,7 +127,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
- err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
@@ -136,7 +136,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 597f319d4770..a59199ed590d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -55,12 +55,16 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi);
+ if (xp_tx_metadata_enabled(sq->xsk_pool))
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .xsk_meta = {} });
sq->doorbell_cseg = &nopwqe->ctrl;
}
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
struct xsk_buff_pool *pool = sq->xsk_pool;
+ struct xsk_tx_metadata *meta = NULL;
union mlx5e_xdp_info xdpi;
bool work_done = true;
bool flush = false;
@@ -93,12 +97,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len;
+ meta = xsk_buff_get_metadata(pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd,
- check_result);
+ check_result, meta);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
@@ -106,6 +111,16 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xsk_tx_post_err(sq, &xdpi);
} else {
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ if (xp_tx_metadata_enabled(sq->xsk_pool)) {
+ struct xsk_tx_metadata_compl compl;
+
+ xsk_tx_metadata_to_compl(meta, &compl);
+ XSK_TX_COMPL_FITS(void *);
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info)
+ { .xsk_meta = compl });
+ }
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 161c5190c236..05612d9c6080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
+ attrs->dir = x->xso.dir;
+
/* esn */
if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ goto skip_replay_window;
+
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
}
}
- attrs->dir = x->xso.dir;
+skip_replay_window:
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+ if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+ x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d4ebd8743114..b2cabd6ab86c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *sa,
- bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
+{
+ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa, bool encrypt,
+ bool is_tx, u32 *fs_id)
+{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+ if (!macsec_rule)
+ return -ENOMEM;
+
+ sa->macsec_rule = macsec_rule;
+
+ return 0;
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
- struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
- rule_attrs.sci = sa->sci;
- rule_attrs.assoc_num = sa->assoc_num;
- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
- if (!macsec_rule) {
- err = -ENOMEM;
- goto destroy_macsec_object;
+ if (sa->active) {
+ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+ if (err)
+ goto destroy_macsec_object;
}
- sa->macsec_rule = macsec_rule;
-
return 0;
destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
- if (!secy->operational ||
- assoc_num != tx_sc->encoding_sa ||
- !tx_sa->active)
+ if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ if (rx_sa->active)
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
- &rx_sc->sc_xarray_element->fs_id);
+ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index bb7f86c993e5..e66f486faafe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in || !ft->g) {
- kfree(ft->g);
- kvfree(in);
+ if (!ft->g)
return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
switch (type) {
@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
-err:
+err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
kvfree(in);
-
+err_free_g:
+ kfree(ft->g);
+ ft->g = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 41c396e76457..6ed3a32b7e22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -74,7 +74,73 @@ int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
return err;
}
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
+{
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+
+ if (mlx5_lag_is_lacp_owner(mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
+{
+ mlx5_core_destroy_tis(mdev, tisn);
+}
+
+static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
+{
+ int tc, i;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
+ for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
+ mlx5e_destroy_tis(mdev, tisn[i][tc]);
+}
+
+static bool mlx5_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
+}
+
+static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
+{
+ int tc, i;
+ int err;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
+ for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, prio, tc << 1);
+
+ if (mlx5_lag_should_assign_affinity(mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(mdev, in, &tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
+ }
+
+ return 0;
+
+err_close_tises:
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(mdev, tisn[i][tc]);
+ tc = MLX5_MAX_NUM_TC;
+ }
+
+ return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@@ -103,6 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
+ if (create_tises) {
+ err = mlx5e_create_tises(mdev, res->tisn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+ goto err_destroy_bfreg;
+ }
+ res->tisn_valid = true;
+ }
+
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@@ -115,6 +190,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
return 0;
+err_destroy_bfreg:
+ mlx5_free_bfreg(mdev, &res->bfreg);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -130,6 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
+ if (res->tisn_valid)
+ mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c7c1b667b105..cc51ce16df14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1262,27 +1262,29 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
+ err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
+ rxfh->indir, rxfh->key, &rxfh->hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
-static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete)
+int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 *rss_context = &rxfh->rss_context;
+ u8 hfunc = rxfh->hfunc;
int err;
mutex_lock(&priv->state_lock);
- if (delete) {
+ if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
@@ -1295,7 +1297,8 @@ static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
goto unlock;
}
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
+ rxfh->indir, rxfh->key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
unlock:
@@ -1303,25 +1306,6 @@ unlock:
return err;
}
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int err;
-
- mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
- mutex_unlock(&priv->state_lock);
- return err;
-}
-
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -2398,6 +2382,7 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
@@ -2420,8 +2405,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
- .get_rxfh_context = mlx5e_get_rxfh_context,
- .set_rxfh_context = mlx5e_set_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0c87ddb8a7a2..c8e8f512803e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -902,6 +902,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.nid = node;
pp_params.dev = rq->pdev;
pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
@@ -1351,6 +1352,17 @@ void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_free_rq(rq);
}
+u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
+ u8 lag_port, u8 tc)
+{
+ if (profile->get_tisn)
+ return profile->get_tisn(mdev, priv, lag_port, tc);
+
+ return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc];
+}
+
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
kvfree(sq->db.xdpi_fifo.xi);
@@ -1919,7 +1931,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
+ csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, 0); /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1981,11 +1994,12 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
-static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
+static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ struct workqueue_struct *workqueue,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int err;
u32 i;
@@ -2012,13 +2026,13 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
}
cq->mdev = mdev;
- cq->netdev = priv->netdev;
- cq->priv = priv;
+ cq->netdev = netdev;
+ cq->workqueue = workqueue;
return 0;
}
-static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
+static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
@@ -2029,7 +2043,7 @@ static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(priv, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2095,14 +2109,13 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
-int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5e_alloc_cq(priv, param, ccp, cq);
+ err = mlx5e_alloc_cq(mdev, param, ccp, cq);
if (err)
return err;
@@ -2135,7 +2148,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp,
ccp, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
@@ -2203,12 +2216,15 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
u32 qos_queue_group_id;
+ u32 tisn;
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, tc);
err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
if (err)
goto err_close_sqs;
- err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
+ err = mlx5e_open_txqsq(c, tisn, txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
&c->priv->channel_stats[c->ix]->sq[tc]);
@@ -2336,12 +2352,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
@@ -2350,17 +2366,17 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
- err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
- err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
+ err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
@@ -3307,7 +3323,7 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -3363,75 +3379,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
mlx5e_free_cq(&drop_rq->cq);
}
-int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
-{
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
-
- if (MLX5_GET(tisc, tisc, tls_en))
- MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
-
- if (mlx5_lag_is_lacp_owner(mdev))
- MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
-
- return mlx5_core_create_tis(mdev, in, tisn);
-}
-
-void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
-{
- mlx5_core_destroy_tis(mdev, tisn);
-}
-
-void mlx5e_destroy_tises(struct mlx5e_priv *priv)
-{
- int tc, i;
-
- for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
-}
-
-static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
-}
-
-int mlx5e_create_tises(struct mlx5e_priv *priv)
-{
- int tc, i;
- int err;
-
- for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
-
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- MLX5_SET(tisc, tisc, prio, tc << 1);
-
- if (mlx5e_lag_should_assign_affinity(priv->mdev))
- MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
-
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
- if (err)
- goto err_close_tises;
- }
- }
-
- return 0;
-
-err_close_tises:
- for (; i >= 0; i--) {
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
- tc = priv->profile->max_tc;
- }
-
- return err;
-}
-
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
if (priv->mqprio_rl) {
@@ -3440,7 +3387,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
priv->mqprio_rl = NULL;
}
mlx5e_accel_cleanup_tx(priv);
- mlx5e_destroy_tises(priv);
}
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
@@ -3542,7 +3488,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- if (tc && tc != MLX5E_MAX_NUM_TC)
+ if (tc && tc != MLX5_MAX_NUM_TC)
return -EINVAL;
new_params = priv->channels.params;
@@ -5185,6 +5131,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
@@ -5265,7 +5212,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
- netdev->features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
@@ -5505,23 +5451,13 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
- err = mlx5e_create_tises(priv);
- if (err) {
- mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
- return err;
- }
-
err = mlx5e_accel_init_tx(priv);
if (err)
- goto err_destroy_tises;
+ return err;
mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
-
-err_destroy_tises:
- mlx5e_destroy_tises(priv);
- return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5616,7 +5552,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
- .max_tc = MLX5E_MAX_NUM_TC,
+ .max_tc = MLX5_MAX_NUM_TC,
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
@@ -6056,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;
@@ -6069,7 +6005,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+static int _mlx5e_suspend(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6087,15 +6023,18 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
return 0;
}
-static int mlx5e_probe(struct auxiliary_device *adev,
- const struct auxiliary_device_id *id)
+static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ return _mlx5e_suspend(adev);
+}
+
+static int _mlx5e_probe(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5e_dev *mlx5e_dev;
struct net_device *netdev;
- pm_message_t state = {};
struct mlx5e_priv *priv;
int err;
@@ -6150,7 +6089,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
return 0;
err_resume:
- mlx5e_suspend(adev, state);
+ _mlx5e_suspend(adev);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6162,16 +6101,21 @@ err_devlink_unregister:
return err;
}
+static int mlx5e_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ return _mlx5e_probe(adev);
+}
+
static void mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
- pm_message_t state = {};
mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- mlx5e_suspend(adev, state);
+ _mlx5e_suspend(adev);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e92d4f83592e..05527418fa64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -112,8 +112,18 @@ static const struct counter_desc vport_rep_stats_desc[] = {
tx_vport_rdma_multicast_bytes) },
};
+static const struct counter_desc vport_rep_loopback_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ vport_loopback_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ vport_loopback_bytes) },
+};
+
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
+#define NUM_VPORT_REP_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
+ ARRAY_SIZE(vport_rep_loopback_stats_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
@@ -157,7 +167,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
{
- return NUM_VPORT_REP_HW_COUNTERS;
+ return NUM_VPORT_REP_HW_COUNTERS +
+ NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
@@ -166,6 +177,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_rep_loopback_stats_desc[i].format);
return idx;
}
@@ -176,6 +190,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
+ for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_loopback_stats_desc, i);
return idx;
}
@@ -247,6 +264,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
rep_stats->tx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, received_ib_multicast.octets);
+ if (MLX5_CAP_GEN(priv->mdev, vport_counter_local_loopback)) {
+ rep_stats->vport_loopback_packets =
+ MLX5_GET_CTR(out, local_loopback.packets);
+ rep_stats->vport_loopback_bytes =
+ MLX5_GET_CTR(out, local_loopback.octets);
+ }
+
out:
kvfree(out);
}
@@ -1156,12 +1180,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
int err;
- err = mlx5e_create_tises(priv);
- if (err) {
- mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
- return err;
- }
-
err = mlx5e_rep_neigh_init(rpriv);
if (err)
goto err_neigh_init;
@@ -1184,7 +1202,6 @@ err_ht_init:
err_init_tx:
mlx5e_rep_neigh_cleanup(rpriv);
err_neigh_init:
- mlx5e_destroy_tises(priv);
return err;
}
@@ -1198,7 +1215,6 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
- mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1428,7 +1444,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
- .max_tc = MLX5E_MAX_NUM_TC,
+ .max_tc = MLX5_MAX_NUM_TC,
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8d9743a5e42c..d601b5faaed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -298,8 +298,8 @@ static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
struct page *page = frag_page->page;
- if (page_pool_defrag_page(page, drain_count) == 0)
- page_pool_put_defragged_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_page(page, drain_count) == 0)
+ page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -1039,7 +1039,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(cq->priv->wq, &sq->recover_work);
+ queue_work(cq->workqueue, &sq->recover_work);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 477c547dcc04..12b3607afecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -476,6 +476,8 @@ struct mlx5e_rep_stats {
u64 tx_vport_rdma_multicast_packets;
u64 rx_vport_rdma_multicast_bytes;
u64 tx_vport_rdma_multicast_bytes;
+ u64 vport_loopback_packets;
+ u64 vport_loopback_bytes;
};
struct mlx5e_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 96af9e2ab1d8..9fb2c057bd78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, priv->max_nch));
+ mlx5e_rqt_size(mdev, hp->num_channels));
if (err)
return err;
@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
+
+ list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
- list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}
@@ -3209,10 +3210,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
- set_masks = &hdrs[0].masks;
- add_masks = &hdrs[1].masks;
- set_vals = &hdrs[0].vals;
- add_vals = &hdrs[1].vals;
+ set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks;
+ add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks;
+ set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals;
+ add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -5030,22 +5031,6 @@ int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
return apply_police_params(priv, 0, extack);
}
-void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct rtnl_link_stats64 cur_stats;
- u64 dbytes;
- u64 dpkts;
-
- mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
- dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
- dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
- rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
- FLOW_ACTION_HW_STATS_DELAYED);
-}
-
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index adb39e30f90f..c24bda56b2b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -203,8 +203,6 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
-void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f0b506e562df..2fa076b23fbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ /* ensure skb is put on metadata_map before tracking the index */
+ wmb();
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
@@ -861,7 +863,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->priv->wq, &sq->recover_work);
+ queue_work(cq->workqueue, &sq->recover_work);
}
stats->cqe_err++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index a7ed87e9d842..22dd30cf8033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 190f10aba170..5a0047bdcb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
+ if (!rpriv || !rpriv->netdev)
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b4eb17141edf..349e28a6dd8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -618,13 +618,6 @@ static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
return esw && MLX5_ESWITCH_MANAGER(esw->dev);
}
-/* The returned number is valid only when the dev is eswitch manager. */
-static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
-{
- return mlx5_core_is_ecpf_esw_manager(dev) ?
- MLX5_VPORT_ECPF : MLX5_VPORT_PF;
-}
-
static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b0455134c98e..baaae628b0a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
- bool vf_dest = false, pf_dest = false;
+ bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
- pf_dest = true;
+ /* Uplink dest is external, but considered as internal
+ * if there is reformat because firmware uses LB+hairpin to support it.
+ */
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+ external_dest = true;
else
- vf_dest = true;
+ internal_dest = true;
- if (vf_dest && pf_dest)
+ if (internal_dest && external_dest)
return true;
}
@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
- esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct net *devl_net, *netdev_net;
- bool ret = false;
-
- mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
- if (dev->mlx5e_res.uplink_netdev) {
- netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
- ret = net_eq(devl_net, netdev_net);
- }
- mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
- return ret;
-}
-
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
- return -EPERM;
- }
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a4b925331661..9b8599c200e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
+ MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+ !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
@@ -1144,3 +1146,37 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
return mlx5_fs_cmd_get_stub_cmds();
}
}
+
+int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
+
+ if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
+ MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1);
+ MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode);
+
+ return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
+}
+
+int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect)
+{
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+
+ if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+ MLX5_SET(set_flow_table_root_in, in, table_type,
+ FS_FT_NIC_TX);
+ if (disconnect)
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ else
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 7790ae5531e1..53e0e5137d3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -122,4 +122,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
+int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
+int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 4aed1768b85f..78eb6b7097e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -181,7 +181,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_handle {
int num_rules;
- struct mlx5_flow_rule *rule[];
+ struct mlx5_flow_rule *rule[] __counted_by(num_rules);
};
/* Type of children is mlx5_flow_group */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 17fe30a4c06c..0c26d707eed2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -539,7 +539,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
- struct mlx5_fc fcs[];
+ struct mlx5_fc fcs[] __counted_by(bulk_len);
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index c4e19d627da2..2911aa34a5be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -348,6 +348,25 @@ static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
}
#endif
+static const struct pci_device_id mgt_ifc_device_ids[] = {
+ { PCI_VDEVICE(MELLANOX, 0xc2d2) }, /* BlueField1 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d3) }, /* BlueField2 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d4) }, /* BlueField3-Lx MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d5) }, /* BlueField3 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d6) }, /* BlueField4 MGT interface device ID */
+};
+
+static bool mlx5_is_mgt_ifc_pci_device(struct mlx5_core_dev *dev, u16 dev_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mgt_ifc_device_ids); ++i)
+ if (mgt_ifc_device_ids[i].device == dev_id)
+ return true;
+
+ return false;
+}
+
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -362,10 +381,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
if (err)
return pcibios_err_to_errno(err);
- if (sdev_id != dev_id) {
- mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
- return -EPERM;
- }
+
+ if (sdev_id == dev_id)
+ continue;
+
+ if (mlx5_is_mgt_ifc_pci_device(dev, sdev_id))
+ continue;
+
+ mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
+ return -EPERM;
}
return 0;
}
@@ -679,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return;
+
+ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
@@ -709,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ struct mlx5_fw_reset *fw_reset;
int err;
+ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+ return 0;
+
+ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -747,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8ff6dc9bc803..b5c709bba155 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
- devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 2bf77a5251b4..d77be1b4dd9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -339,7 +339,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &ipriv->tisn);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -356,7 +356,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
+ mlx5e_destroy_tis(priv->mdev, ipriv->tisn);
mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
@@ -483,6 +483,18 @@ static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5i_stats_grps);
}
+u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ if (WARN(lag_port || tc,
+ "IPoIB unexpected non-zero value: lag_port (%u), tc (%u)\n",
+ lag_port, tc))
+ return 0;
+
+ return ipriv->tisn;
+}
+
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
@@ -499,6 +511,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.max_tc = MLX5I_MAX_NUM_TC,
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
+ .get_tisn = mlx5i_get_tisn,
};
/* mlx5i netdev NDos */
@@ -770,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}
@@ -829,7 +842,7 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
*params = (struct rdma_netdev_alloc_params){
.sizeof_priv = sizeof(struct mlx5i_priv) +
sizeof(struct mlx5e_priv),
- .txqs = nch * MLX5E_MAX_NUM_TC,
+ .txqs = nch * MLX5_MAX_NUM_TC,
.rxqs = nch,
.param = mdev,
.initialize_rdma_netdev = mlx5_rdma_setup_rn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index f3f2af972020..2ab6437a1c49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -53,6 +53,7 @@ extern const struct mlx5e_rx_handlers mlx5i_rx_handlers;
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
u32 qpn;
+ u32 tisn;
bool sub_interface;
u32 num_sub_interfaces;
u32 qkey;
@@ -63,6 +64,7 @@ struct mlx5i_priv {
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
+u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc);
/* Underlay QP create/destroy functions */
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 03e681297937..f87471306f6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -218,7 +218,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]);
+ err = mlx5i_create_tis(mdev, ipriv->qpn, &ipriv->tisn);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -240,7 +240,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
err_close_channels:
mlx5e_close_channels(&epriv->channels);
err_clear_state_opened_flag:
- mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
+ mlx5e_destroy_tis(mdev, ipriv->tisn);
err_remove_rx_uderlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_unint_underlay_qp:
@@ -269,7 +269,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
- mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
+ mlx5e_destroy_tis(mdev, ipriv->tisn);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
@@ -361,6 +361,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.update_stats = NULL,
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
+ .get_tisn = mlx5i_get_tisn,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 40c7be124041..58bd749b5e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0c83ef174275..0361741632a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -266,9 +266,6 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
return -EINVAL;
@@ -286,12 +283,15 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_settime_real_time(mdev, ts);
- if (err)
- return err;
+
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_settime_real_time(mdev, ts);
+
+ if (err)
+ return err;
+ }
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
@@ -341,9 +341,6 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
/* HW time adjustment range is checked. If out of range, settime instead */
if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
struct timespec64 ts;
@@ -367,13 +364,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjtime_real_time(mdev, delta);
- if (err)
- return err;
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+
+ if (err)
+ return err;
+ }
+
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&timer->tc, delta);
mlx5_update_clock_info_page(mdev);
@@ -396,15 +396,14 @@ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_p
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
- if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
+ scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
+ /* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
MLX5_SET(mtutc_reg, in, freq_adj_units,
MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
- MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
} else {
MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
@@ -420,13 +419,15 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_core_dev *mdev;
unsigned long flags;
u32 mult;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
- if (err)
- return err;
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+
+ if (err)
+ return err;
+ }
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -1004,14 +1005,38 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
info->frac = timer->tc.frac;
}
+static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+ u8 log_max_freq_adjustment = 0;
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTUTC, 0, 0);
+ if (!err)
+ log_max_freq_adjustment =
+ MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
+
+ if (log_max_freq_adjustment)
+ clock->ptp_info.max_adj =
+ min(S32_MAX, 1 << log_max_freq_adjustment);
+}
+
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
+ /* Configure the PHC */
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ if (MLX5_CAP_MCAM_REG(mdev, mtutc))
+ mlx5_init_timer_max_freq_adjustment(mdev);
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
- clock->ptp_info = mlx5_ptp_clock_info;
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
@@ -1042,11 +1067,10 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
}
seqlock_init(&clock->lock);
- mlx5_init_timer_clock(mdev);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
- /* Configure the PHC */
- clock->ptp_info = mlx5_ptp_clock_info;
+ /* Initialize the device clock */
+ mlx5_init_timer_clock(mdev);
/* Initialize 1PPS data structures */
mlx5_init_pps(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e8e50563e956..e7d59cfa8708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -256,6 +256,13 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
devcom_free_comp_dev(devcom);
}
+int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
+{
+ struct mlx5_devcom_comp *comp = devcom->comp;
+
+ return kref_read(&comp->ref);
+}
+
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index fc23bbef87b4..ec32b686f586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -31,6 +31,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data);
+int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom);
void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 9482e51ac82a..7c5516b0a844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -13,11 +13,13 @@ struct mlx5_dm {
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
+ unsigned long *header_encap_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
u64 header_modify_pattern_icm_blocks = 0;
+ u64 header_sw_encap_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
@@ -54,6 +56,17 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
goto err_modify_hdr;
}
+ if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) {
+ header_sw_encap_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_encap_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL);
+ if (!dm->header_encap_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
@@ -66,11 +79,14 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
dm->header_modify_pattern_sw_icm_alloc_blocks =
bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
- goto err_pattern;
+ goto err_sw_encap;
}
return dm;
+err_sw_encap:
+ bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+
err_pattern:
bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
@@ -105,6 +121,14 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
}
+ if (dm->header_encap_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_indirect_encap_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+ }
+
if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev,
@@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_pattern_sw_icm_size);
block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_SW_ENCAP:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ indirect_encap_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_indirect_encap_sw_icm_size);
+ block_map = dm->header_encap_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
header_modify_pattern_sw_icm_start_address);
block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_SW_ENCAP:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ indirect_encap_sw_icm_start_address);
+ block_map = dm->header_encap_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a17152c1cbb2..bccf6e53556c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -219,7 +219,6 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
- int remaining_size = driver_ver_sz;
char *string;
if (!MLX5_CAP_GEN(dev, driver_version))
@@ -227,22 +226,9 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
- strncpy(string, "Linux", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, ",", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, KBUILD_MODNAME, remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, ",", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-
- snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL);
+ snprintf(string, driver_ver_sz, "Linux,%s,%u.%u.%u",
+ KBUILD_MODNAME, LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL);
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6b14e347d914..a79b7959361b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -243,6 +243,7 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
+int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir);
void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7d8c732818f2..7fba1c46e2ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1206,3 +1206,13 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
*speed = max_speed;
return 0;
}
+
+int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir)
+{
+ u32 in[MLX5_ST_SZ_DW(mpir_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(mpir_reg);
+
+ MLX5_SET(mpir_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sz, mpir, sz, MLX5_REG_MPIR, 0, 0);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index e3ec559369fa..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
- attr.hit_gvmi = action->vport->caps->vhca_gvmi;
- dest_action = action;
- attr.final_icm_addr = rx_rule ?
- action->vport->caps->icm_address_rx :
- action->vport->caps->icm_address_tx;
+ if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+ /* can't go to uplink on RX rule - dropping instead */
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ } else {
+ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+ dest_action = action;
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
+ }
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
@@ -1170,7 +1177,6 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
- struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
@@ -1249,11 +1255,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
* one that done in the TX.
* So, if one of the ft target is wire, put it at the end of the dest list.
*/
- if (is_ft_wire && num_dst_ft > 1) {
- tmp_hw_dest = hw_dests[last_dest];
- hw_dests[last_dest] = hw_dests[num_of_dests - 1];
- hw_dests[num_of_dests - 1] = tmp_hw_dest;
- }
+ if (is_ft_wire && num_dst_ft > 1)
+ swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]);
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21753f327868..1005bb6935b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
+
+ *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.sd_group);
+out:
+ kvfree(out);
+ return err;
+}
+
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 954ba0826c61..3d09fa54598f 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -130,9 +130,15 @@ static int mlxbf_gige_open(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
+ u64 control;
u64 int_en;
int err;
+ /* Perform general init of GigE block */
+ control = readq(priv->base + MLXBF_GIGE_CONTROL);
+ control |= MLXBF_GIGE_CONTROL_PORT_EN;
+ writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
err = mlxbf_gige_request_irqs(priv);
if (err)
return err;
@@ -147,14 +153,14 @@ static int mlxbf_gige_open(struct net_device *netdev)
*/
priv->valid_polarity = 0;
- err = mlxbf_gige_rx_init(priv);
+ phy_start(phydev);
+
+ err = mlxbf_gige_tx_init(priv);
if (err)
goto free_irqs;
- err = mlxbf_gige_tx_init(priv);
+ err = mlxbf_gige_rx_init(priv);
if (err)
- goto rx_deinit;
-
- phy_start(phydev);
+ goto tx_deinit;
netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
@@ -176,8 +182,8 @@ static int mlxbf_gige_open(struct net_device *netdev)
return 0;
-rx_deinit:
- mlxbf_gige_rx_deinit(priv);
+tx_deinit:
+ mlxbf_gige_tx_deinit(priv);
free_irqs:
mlxbf_gige_free_irqs(priv);
@@ -365,7 +371,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
- u64 control;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@@ -380,11 +385,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
if (IS_ERR(plu_base))
return PTR_ERR(plu_base);
- /* Perform general init of GigE block */
- control = readq(base + MLXBF_GIGE_CONTROL);
- control |= MLXBF_GIGE_CONTROL_PORT_EN;
- writeq(control, base + MLXBF_GIGE_CONTROL);
-
netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
if (!netdev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 227d01cace3f..699984358493 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -142,6 +142,9 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+ writeq(ilog2(priv->rx_q_entries),
+ priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+
/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
* indicate readiness to receive interrupts
*/
@@ -154,9 +157,6 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
data |= MLXBF_GIGE_RX_DMA_EN;
writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
- writeq(ilog2(priv->rx_q_entries),
- priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
-
return 0;
free_wqe_and_skb:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index e827c78be114..e3271c845ee6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -282,6 +282,12 @@ MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, lag_mode_support, 0x18, 1, 1);
+/* cmd_mbox_query_fw_cff_support
+ * 0: CONFIG_PROFILE.flood_mode = 5 (CFF) is not supported by FW
+ * 1: CONFIG_PROFILE.flood_mode = 5 (CFF) is supported by FW
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cff_support, 0x18, 2, 1);
+
/* cmd_mbox_query_fw_clr_int_base_offset
* Clear Interrupt register's offset from clr_int_bar register
* in PCI address space.
@@ -779,6 +785,11 @@ enum mlxsw_cmd_mbox_config_profile_flood_mode {
* used.
*/
MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+ /* CFF - Compressed FID Flood (CFF) mode.
+ * Reserved when legacy bridge model is used.
+ * Supported only by Spectrum-2+.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF = 5,
};
/* cmd_mbox_config_profile_flood_mode
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f23421f038f3..e4d7739bd7c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -211,6 +211,13 @@ mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_lag_mode);
+enum mlxsw_cmd_mbox_config_profile_flood_mode
+mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->flood_mode(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_flood_mode);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 764d14bd5bc0..6d11225594dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -38,6 +38,8 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
enum mlxsw_cmd_mbox_config_profile_lag_mode
mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core);
+enum mlxsw_cmd_mbox_config_profile_flood_mode
+mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
@@ -322,7 +324,12 @@ struct mlxsw_config_profile {
u16 max_regions;
u8 max_flood_tables;
u8 max_vid_flood_tables;
+
+ /* Flood mode to use if used_flood_mode. If flood_mode_prefer_cff,
+ * the backup flood mode (if any) when CFF unsupported.
+ */
u8 flood_mode;
+
u8 max_fid_offset_flood_tables;
u16 fid_offset_flood_table_size;
u8 max_fid_flood_tables;
@@ -338,6 +345,7 @@ struct mlxsw_config_profile {
u8 kvd_hash_double_parts;
u8 cqe_time_stamp_type;
bool lag_mode_prefer_sw;
+ bool flood_mode_prefer_cff;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -489,6 +497,7 @@ struct mlxsw_bus {
u32 (*read_utc_sec)(void *bus_priv);
u32 (*read_utc_nsec)(void *bus_priv);
enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv);
+ enum mlxsw_cmd_mbox_config_profile_flood_mode (*flood_mode)(void *priv);
u8 features;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e4b25e187467..af99bf17eb36 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -106,7 +106,9 @@ struct mlxsw_pci {
u64 utc_sec_offset;
u64 utc_nsec_offset;
bool lag_mode_support;
+ bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
+ enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -130,6 +132,7 @@ struct mlxsw_pci {
const struct pci_device_id *id;
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
u8 num_sdq_cqs; /* Number of CQs used for SDQs */
+ bool skip_reset;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1245,11 +1248,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
mbox, profile->fid_flood_table_size);
}
- if (profile->used_flood_mode) {
+ if (profile->flood_mode_prefer_cff && mlxsw_pci->cff_support) {
+ enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode =
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF;
+
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(mbox, flood_mode);
+ mlxsw_pci->flood_mode = flood_mode;
+ } else if (profile->used_flood_mode) {
mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_flood_mode_set(
mbox, profile->flood_mode);
+ mlxsw_pci->flood_mode = profile->flood_mode;
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
}
if (profile->used_max_ib_mc) {
mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
@@ -1476,11 +1490,47 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
- const struct pci_device_id *id)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ int err;
+
+ mlxsw_reg_mrsr_pack(mrsr_pl,
+ MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
+ err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ if (err)
+ return err;
+
+ device_lock_assert(&pdev->dev);
+
+ pci_cfg_access_lock(pdev);
+ pci_save_state(pdev);
+
+ err = __pci_reset_function_locked(pdev);
+ if (err)
+ pci_err(pdev, "PCI function reset failed with %d\n", err);
+
+ pci_restore_state(pdev);
+ pci_cfg_access_unlock(pdev);
+
+ return err;
+}
+
+static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci)
+{
+ char mrsr_pl[MLXSW_REG_MRSR_LEN];
+
+ mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET);
+ return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+}
+
+static int
+mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char mcam_pl[MLXSW_REG_MCAM_LEN];
+ bool pci_reset_supported;
u32 sys_status;
int err;
@@ -1491,8 +1541,26 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return err;
}
- mlxsw_reg_mrsr_pack(mrsr_pl);
- err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ /* PCI core already issued a PCI reset, do not issue another reset. */
+ if (mlxsw_pci->skip_reset)
+ return 0;
+
+ mlxsw_reg_mcam_pack(mcam_pl,
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
+ err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
+
+ if (pci_reset_supported) {
+ pci_dbg(pdev, "Starting PCI reset flow\n");
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ } else {
+ pci_dbg(pdev, "Starting software reset flow\n");
+ err = mlxsw_pci_reset_sw(mlxsw_pci);
+ }
if (err)
return err;
@@ -1537,9 +1605,9 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (!mbox)
return -ENOMEM;
- err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id);
if (err)
- goto err_sw_reset;
+ goto err_reset;
err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
@@ -1601,6 +1669,9 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->lag_mode_support =
mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
+ mlxsw_pci->cff_support =
+ mlxsw_cmd_mbox_query_fw_cff_support_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1672,7 +1743,7 @@ err_iface_rev:
err_query_fw:
mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_alloc_irq:
-err_sw_reset:
+err_reset:
mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
@@ -1917,6 +1988,14 @@ mlxsw_pci_lag_mode(void *bus_priv)
return mlxsw_pci->lag_mode;
}
+static enum mlxsw_cmd_mbox_config_profile_flood_mode
+mlxsw_pci_flood_mode(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci->flood_mode;
+}
+
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
@@ -1929,6 +2008,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.read_utc_sec = mlxsw_pci_read_utc_sec,
.read_utc_nsec = mlxsw_pci_read_utc_nsec,
.lag_mode = mlxsw_pci_lag_mode,
+ .flood_mode = mlxsw_pci_flood_mode,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -2059,11 +2139,34 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
kfree(mlxsw_pci);
}
+static void mlxsw_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+}
+
+static void mlxsw_pci_reset_done(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_pci->skip_reset = true;
+ mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus,
+ mlxsw_pci, false, NULL, NULL);
+ mlxsw_pci->skip_reset = false;
+}
+
+static const struct pci_error_handlers mlxsw_pci_err_handler = {
+ .reset_prepare = mlxsw_pci_reset_prepare,
+ .reset_done = mlxsw_pci_reset_done,
+};
+
int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
{
pci_driver->probe = mlxsw_pci_probe;
pci_driver->remove = mlxsw_pci_remove;
pci_driver->shutdown = mlxsw_pci_remove;
+ pci_driver->err_handler = &mlxsw_pci_err_handler;
return pci_register_driver(pci_driver);
}
EXPORT_SYMBOL(mlxsw_pci_driver_register);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 25b294fdeb3d..8892654c685f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1024,6 +1024,8 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
* ------------------------------------------
* The following register controls the association of flooding tables and MIDs
* to packet types used for flooding.
+ *
+ * Reserved when CONFIG_PROFILE.flood_mode = CFF.
*/
#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_LEN 0x14
@@ -1862,6 +1864,7 @@ MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
* Access: RW
*
* Note: Reserved when legacy bridge model is used.
+ * Reserved when CONFIG_PROFILE.flood_mode = CFF.
*/
MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
@@ -1872,6 +1875,7 @@ MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
* Access: RW
*
* Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ * Reserved when CONFIG_PROFILE.flood_mode = CFF
*/
MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
@@ -1880,6 +1884,8 @@ MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
* Used to point into the flooding table selected by SFGC register if
* the table is of type FID-Offset. Otherwise, this field is reserved.
* Access: RW
+ *
+ * Note: Reserved when CONFIG_PROFILE.flood_mode = CFF
*/
MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
@@ -1938,6 +1944,35 @@ MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
*/
MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+/* reg_sfmr_cff_mid_base
+ * Pointer to PGT table.
+ * Range: 0..(cap_max_pgt-1)
+ * Access: RW
+ *
+ * Note: Reserved when SwitchX/-2 and Spectrum-1.
+ * Supported when CONFIG_PROFILE.flood_mode = CFF.
+ */
+MLXSW_ITEM32(reg, sfmr, cff_mid_base, 0x20, 0, 16);
+
+/* reg_sfmr_nve_flood_prf_id
+ * FID flooding profile_id for NVE Encap
+ * Range 0..(max_cap_nve_flood_prf-1)
+ * Access: RW
+ *
+ * Note: Reserved when SwitchX/-2 and Spectrum-1
+ */
+MLXSW_ITEM32(reg, sfmr, nve_flood_prf_id, 0x24, 8, 2);
+
+/* reg_sfmr_cff_prf_id
+ * Compressed Fid Flooding profile_id
+ * Range 0..(max_cap_nve_flood_prf-1)
+ * Access: RW
+ *
+ * Note: Reserved when SwitchX/-2 and Spectrum-1
+ * Supported only when CONFIG_PROFLE.flood_mode = CFF.
+ */
+MLXSW_ITEM32(reg, sfmr, cff_prf_id, 0x24, 0, 2);
+
/* reg_sfmr_smpe_valid
* SMPE is valid.
* Access: RW
@@ -1959,18 +1994,11 @@ MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset, bool flood_rsp,
- enum mlxsw_reg_bridge_type bridge_type,
bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
mlxsw_reg_sfmr_fid_set(payload, fid);
- mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
- mlxsw_reg_sfmr_vtfp_set(payload, false);
- mlxsw_reg_sfmr_vv_set(payload, false);
- mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
- mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
@@ -2168,6 +2196,50 @@ static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1,
mlxsw_reg_spvc_et0_set(payload, et0);
}
+/* SFFP - Switch FID Flooding Profiles Register
+ * --------------------------------------------
+ * The SFFP register populates the fid flooding profile tables used for the NVE
+ * flooding and Compressed-FID Flooding (CFF).
+ *
+ * Reserved on Spectrum-1.
+ */
+#define MLXSW_REG_SFFP_ID 0x2029
+#define MLXSW_REG_SFFP_LEN 0x0C
+
+MLXSW_REG_DEFINE(sffp, MLXSW_REG_SFFP_ID, MLXSW_REG_SFFP_LEN);
+
+/* reg_sffp_profile_id
+ * Profile ID a.k.a. SFMR.nve_flood_prf_id or SFMR.cff_prf_id
+ * Range 0..max_cap_nve_flood_prf-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sffp, profile_id, 0x00, 16, 2);
+
+/* reg_sffp_type
+ * The traffic type to reach the flooding table.
+ * Same as SFGC.type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sffp, type, 0x00, 0, 4);
+
+/* reg_sffp_flood_offset
+ * Flood offset. Offset to add to SFMR.cff_mid_base to get the final PGT address
+ * for FID flood; or offset to add to SFMR.nve_tunnel_flood_ptr to get KVD
+ * pointer for NVE underlay.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sffp, flood_offset, 0x04, 0, 3);
+
+static inline void mlxsw_reg_sffp_pack(char *payload, u8 profile_id,
+ enum mlxsw_reg_sfgc_type type,
+ u8 flood_offset)
+{
+ MLXSW_REG_ZERO(sffp, payload);
+ mlxsw_reg_sffp_profile_id_set(payload, profile_id);
+ mlxsw_reg_sffp_type_set(payload, type);
+ mlxsw_reg_sffp_flood_offset_set(payload, flood_offset);
+}
+
/* SPEVET - Switch Port Egress VLAN EtherType
* ------------------------------------------
* The switch port egress VLAN EtherType configures which EtherType to push at
@@ -10122,6 +10194,15 @@ mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid,
MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN);
+enum mlxsw_reg_mrsr_command {
+ /* Switch soft reset, does not reset PCI firmware. */
+ MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET = 1,
+ /* Reset will be done when PCI link will be disabled.
+ * This command will reset PCI firmware also.
+ */
+ MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE = 6,
+};
+
/* reg_mrsr_command
* Reset/shutdown command
* 0 - do nothing
@@ -10130,10 +10211,11 @@ MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN);
*/
MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4);
-static inline void mlxsw_reg_mrsr_pack(char *payload)
+static inline void mlxsw_reg_mrsr_pack(char *payload,
+ enum mlxsw_reg_mrsr_command command)
{
MLXSW_REG_ZERO(mrsr, payload);
- mlxsw_reg_mrsr_command_set(payload, 1);
+ mlxsw_reg_mrsr_command_set(payload, command);
}
/* MLCR - Management LED Control Register
@@ -10584,6 +10666,8 @@ MLXSW_ITEM32(reg, mcam, feature_group, 0x00, 16, 8);
enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
/* If set, MCIA supports 128 bytes payloads. Otherwise, 48 bytes. */
MLXSW_REG_MCAM_MCIA_128B = 34,
+ /* If set, MRSR.command=6 is supported. */
+ MLXSW_REG_MCAM_PCI_RESET = 48,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
@@ -12934,6 +13018,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spfsr),
MLXSW_REG(spvc),
+ MLXSW_REG(sffp),
MLXSW_REG(spevet),
MLXSW_REG(smpe),
MLXSW_REG(smid2),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 89dd2777ec4d..9d7977ebe186 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -27,6 +27,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_FID,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
+ MLXSW_RES_ID_MAX_NVE_FLOOD_PRF,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -88,6 +89,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_FID] = 0x2512,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
+ [MLXSW_RES_ID_MAX_NVE_FLOOD_PRF] = 0x2522,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index cec72d99d9c9..5d3413636a62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3190,10 +3190,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_lag_init;
}
- err = mlxsw_sp_fids_init(mlxsw_sp);
+ err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
- goto err_fids_init;
+ goto err_fid_core_init;
}
err = mlxsw_sp_policers_init(mlxsw_sp);
@@ -3379,8 +3379,8 @@ err_devlink_traps_init:
err_traps_init:
mlxsw_sp_policers_fini(mlxsw_sp);
err_policers_init:
- mlxsw_sp_fids_fini(mlxsw_sp);
-err_fids_init:
+ mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
+err_fid_core_init:
mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_pgt_fini(mlxsw_sp);
@@ -3416,7 +3416,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
mlxsw_sp->pgt_smpe_index_valid = true;
@@ -3450,7 +3450,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
mlxsw_sp->pgt_smpe_index_valid = false;
@@ -3484,7 +3484,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
mlxsw_sp->pgt_smpe_index_valid = false;
@@ -3518,7 +3518,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
mlxsw_sp->pgt_smpe_index_valid = false;
@@ -3552,7 +3552,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_devlink_traps_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
- mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -3598,6 +3598,7 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_cqe_time_stamp_type = 1,
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
.lag_mode_prefer_sw = true,
+ .flood_mode_prefer_cff = true,
};
/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
@@ -3626,6 +3627,7 @@ static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
.used_cqe_time_stamp_type = 1,
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
.lag_mode_prefer_sw = true,
+ .flood_mode_prefer_cff = true,
};
static void
@@ -4515,6 +4517,10 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
+ err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
+ if (err)
+ goto err_fid_port_join_lag;
+
/* Port is no longer usable as a router interface */
if (mlxsw_sp_port->default_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
@@ -4534,6 +4540,8 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err_replay:
mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
err_router_join:
+ mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
+err_fid_port_join_lag:
lag->ref_count--;
mlxsw_sp_port->lagged = 0;
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
@@ -4569,6 +4577,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
*/
mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
+ mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
+
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index c70333b460ea..a0c9775fa955 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -205,7 +205,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
- const struct mlxsw_sp_fid_family **fid_family_arr;
+ const struct mlxsw_sp_fid_core_ops *fid_core_ops;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
@@ -252,6 +252,11 @@ struct mlxsw_sp_ptp_ops {
const struct mlxsw_tx_info *tx_info);
};
+struct mlxsw_sp_fid_core_ops {
+ int (*init)(struct mlxsw_sp *mlxsw_sp);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp);
+};
+
static inline struct mlxsw_sp_upper *
mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
@@ -508,6 +513,10 @@ enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC,
MLXSW_SP_FLOOD_TYPE_MC,
+ /* For RSP FIDs in CFF mode. */
+ MLXSW_SP_FLOOD_TYPE_NOT_UC,
+ /* For NVE traffic. */
+ MLXSW_SP_FLOOD_TYPE_ANY,
};
int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
@@ -753,6 +762,8 @@ union mlxsw_sp_l3addr {
};
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
+ u16 *port, bool *is_lag);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1319,11 +1330,11 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_fid_port_join_lag(const struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_fid_port_leave_lag(const struct mlxsw_sp_port *mlxsw_sp_port);
-extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
-extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+extern const struct mlxsw_sp_fid_core_ops mlxsw_sp1_fid_core_ops;
+extern const struct mlxsw_sp_fid_core_ops mlxsw_sp2_fid_core_ops;
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 4c98950380d5..d231f4d2888b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
unsigned long *p_index)
{
unsigned int num_rows, entry_size;
+ unsigned long index;
/* We only allow allocations of entire rows */
if (num_erps % erp_core->num_erp_banks != 0)
@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
entry_size = erp_core->erpt_entries_size[region_type];
num_rows = num_erps / erp_core->num_erp_banks;
- *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
- if (*p_index == 0)
+ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (!index)
return -ENOBUFS;
- *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index d50786b0a6ce..50ea1eff02b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -681,13 +681,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
+ struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
- mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
- region->id);
+ mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
kfree(region);
}
@@ -1564,6 +1564,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
+ tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
+ MLXSW_REG_PAGT_ACL_MAX_NUM);
err = ops->init(mlxsw_sp, tcam->priv, tcam);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index e954b8cd2ee8..65562ab208b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -11,6 +11,7 @@
#include <linux/refcount.h>
#include "spectrum.h"
+#include "spectrum_router.h"
#include "reg.h"
struct mlxsw_sp_fid_family;
@@ -71,12 +72,12 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_flood_table_type table_type;
+ enum mlxsw_flood_table_type table_type; /* For flood_mode!=CFF. */
int table_index;
};
struct mlxsw_sp_fid_ops {
- void (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
+ int (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
int (*configure)(struct mlxsw_sp_fid *fid);
void (*deconfigure)(struct mlxsw_sp_fid *fid);
int (*index_alloc)(struct mlxsw_sp_fid *fid, const void *arg,
@@ -95,6 +96,34 @@ struct mlxsw_sp_fid_ops {
const struct net_device *nve_dev);
int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif);
+ int (*flood_table_init)(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table);
+ int (*pgt_size)(const struct mlxsw_sp_fid_family *fid_family,
+ u16 *p_pgt_size);
+ u16 (*fid_mid)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_flood_table *flood_table);
+ void (*fid_pack)(char *sfmr_pl, const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op);
+
+ /* These are specific to RFID families and we assume are only
+ * implemented by RFID families, if at all.
+ */
+ int (*fid_port_init)(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port);
+ void (*fid_port_fini)(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port);
+};
+
+enum mlxsw_sp_fid_flood_profile_id {
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_BRIDGE = 1,
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_RSP,
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_NVE,
+};
+
+struct mlxsw_sp_fid_flood_profile {
+ const struct mlxsw_sp_flood_table *flood_tables;
+ int nr_flood_tables;
+ const enum mlxsw_sp_fid_flood_profile_id profile_id; /* For CFF mode. */
};
struct mlxsw_sp_fid_family {
@@ -104,12 +133,11 @@ struct mlxsw_sp_fid_family {
u16 end_index;
struct list_head fids_list;
unsigned long *fids_bitmap;
- const struct mlxsw_sp_flood_table *flood_tables;
- int nr_flood_tables;
+ const struct mlxsw_sp_fid_flood_profile *flood_profile;
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- bool flood_rsp;
+ bool flood_rsp; /* For flood_mode!=CFF. */
enum mlxsw_reg_bridge_type bridge_type;
u16 pgt_base;
bool smpe_index_valid;
@@ -131,10 +159,31 @@ static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
};
+static const int mlxsw_sp_sfgc_not_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
+static const int mlxsw_sp_sfgc_any_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_UC] = mlxsw_sp_sfgc_uc_packet_types,
[MLXSW_SP_FLOOD_TYPE_BC] = mlxsw_sp_sfgc_bc_packet_types,
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_NOT_UC] = mlxsw_sp_sfgc_not_uc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_ANY] = mlxsw_sp_sfgc_any_packet_types,
};
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
@@ -305,10 +354,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
int i;
- for (i = 0; i < fid_family->nr_flood_tables; i++) {
- if (fid_family->flood_tables[i].packet_type != packet_type)
+ for (i = 0; i < fid_family->flood_profile->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_profile->flood_tables[i];
+ if (flood_table->packet_type != packet_type)
continue;
- return &fid_family->flood_tables[i];
+ return flood_table;
}
return NULL;
@@ -320,24 +372,62 @@ mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
return fid_family->end_index - fid_family->start_index + 1;
}
-static u16
-mlxsw_sp_fid_family_pgt_size(const struct mlxsw_sp_fid_family *fid_family)
+static int
+mlxsw_sp_fid_8021d_pgt_size(const struct mlxsw_sp_fid_family *fid_family,
+ u16 *p_pgt_size)
{
u16 num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
- return num_fids * fid_family->nr_flood_tables;
+ *p_pgt_size = num_fids * fid_family->flood_profile->nr_flood_tables;
+ return 0;
+}
+
+static unsigned int mlxsw_sp_fid_rfid_port_offset_cff(unsigned int local_port)
+{
+ /* Port 0 is the CPU port. Since we never create RIFs based off that
+ * port, we don't need to count it.
+ */
+ return WARN_ON_ONCE(!local_port) ? 0 : local_port - 1;
+}
+
+static int
+mlxsw_sp_fid_rfid_pgt_size_cff(const struct mlxsw_sp_fid_family *fid_family,
+ u16 *p_pgt_size)
+{
+ struct mlxsw_core *core = fid_family->mlxsw_sp->core;
+ unsigned int max_ports;
+ u16 pgt_size;
+ u16 max_lags;
+ int err;
+
+ max_ports = mlxsw_core_max_ports(core);
+
+ err = mlxsw_core_max_lag(core, &max_lags);
+ if (err)
+ return err;
+
+ pgt_size = (mlxsw_sp_fid_rfid_port_offset_cff(max_ports) + max_lags) *
+ fid_family->flood_profile->nr_flood_tables;
+ *p_pgt_size = pgt_size;
+ return 0;
}
static u16
-mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
- const struct mlxsw_sp_flood_table *flood_table,
- u16 fid_offset)
+mlxsw_sp_fid_pgt_base_ctl(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
{
u16 num_fids;
num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
- return fid_family->pgt_base + num_fids * flood_table->table_index +
- fid_offset;
+ return fid_family->pgt_base + num_fids * flood_table->table_index;
+}
+
+static u16
+mlxsw_sp_fid_fid_mid_ctl(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ return mlxsw_sp_fid_pgt_base_ctl(fid->fid_family, flood_table) +
+ fid->fid_offset;
}
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
@@ -348,15 +438,14 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_flood_table *flood_table;
u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables))
+ if (WARN_ON(!fid_family->flood_profile))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
- fid->fid_offset);
+ mid_index = fid_family->ops->fid_mid(fid, flood_table);
return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
fid->fid_index, local_port, member);
}
@@ -410,12 +499,13 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid)
return mlxsw_sp_fid_8021q_fid(fid)->vid;
}
-static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
+ return 0;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -424,18 +514,76 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
+static void mlxsw_sp_fid_pack(char *sfmr_pl,
+ const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
u16 smpe;
smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
- fid->fid_offset, fid->fid_family->flood_rsp,
- fid->fid_family->bridge_type,
+ mlxsw_reg_sfmr_pack(sfmr_pl, op, fid->fid_index,
fid->fid_family->smpe_index_valid, smpe);
+}
+
+static void mlxsw_sp_fid_pack_ctl(char *sfmr_pl,
+ const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op)
+{
+ mlxsw_sp_fid_pack(sfmr_pl, fid, op);
+ mlxsw_reg_sfmr_fid_offset_set(sfmr_pl, fid->fid_offset);
+ mlxsw_reg_sfmr_flood_rsp_set(sfmr_pl, fid->fid_family->flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(sfmr_pl,
+ fid->fid_family->bridge_type);
+}
+
+static u16
+mlxsw_sp_fid_off_pgt_base_cff(const struct mlxsw_sp_fid_family *fid_family,
+ u16 fid_offset)
+{
+ return fid_family->pgt_base +
+ fid_offset * fid_family->flood_profile->nr_flood_tables;
+}
+
+static u16 mlxsw_sp_fid_pgt_base_cff(const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_off_pgt_base_cff(fid->fid_family, fid->fid_offset);
+}
+
+static void mlxsw_sp_fid_fid_pack_cff(char *sfmr_pl,
+ const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 pgt_base = mlxsw_sp_fid_pgt_base_cff(fid);
+
+ mlxsw_sp_fid_pack(sfmr_pl, fid, op);
+ mlxsw_reg_sfmr_cff_mid_base_set(sfmr_pl, pgt_base);
+ mlxsw_reg_sfmr_cff_prf_id_set(sfmr_pl,
+ fid_family->flood_profile->profile_id);
+ mlxsw_reg_sfmr_nve_flood_prf_id_set(sfmr_pl,
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_NVE);
+}
+
+static u16 mlxsw_sp_fid_rfid_fid_offset_cff(struct mlxsw_sp *mlxsw_sp,
+ u16 port_lag_id, bool is_lag)
+{
+ u16 max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ if (is_lag)
+ return mlxsw_sp_fid_rfid_port_offset_cff(max_ports) +
+ port_lag_id;
+ else
+ return mlxsw_sp_fid_rfid_port_offset_cff(port_lag_id);
+}
+
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ fid->fid_family->ops->fid_pack(sfmr_pl, fid,
+ mlxsw_sp_sfmr_op(valid));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
@@ -444,15 +592,10 @@ static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
- u16 smpe;
- smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+ fid->fid_family->ops->fid_pack(sfmr_pl, fid,
+ MLXSW_REG_SFMR_OP_CREATE_FID);
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
- fid->fid_index, fid->fid_offset,
- fid->fid_family->flood_rsp,
- fid->fid_family->bridge_type,
- fid->fid_family->smpe_index_valid, smpe);
mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
@@ -768,12 +911,13 @@ mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
return container_of(fid, struct mlxsw_sp_fid_8021d, common);
}
-static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
+ return 0;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
@@ -1058,7 +1202,37 @@ mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
return 0;
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
+static int
+mlxsw_sp_fid_flood_table_init_ctl(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ const int *sfgc_packet_types;
+ u16 mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_pgt_base_ctl(fid_family, flood_table);
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+ if (!sfgc_packet_types[i])
+ continue;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops_ctl = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
@@ -1072,6 +1246,36 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
.vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
+ .flood_table_init = mlxsw_sp_fid_flood_table_init_ctl,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_ctl,
+ .fid_pack = mlxsw_sp_fid_pack_ctl,
+};
+
+static u16
+mlxsw_sp_fid_fid_mid_cff(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ return mlxsw_sp_fid_pgt_base_cff(fid) + flood_table->table_index;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops_cff = {
+ .setup = mlxsw_sp_fid_8021d_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021d_compare,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_cff,
+ .fid_pack = mlxsw_sp_fid_fid_pack_cff,
};
#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
@@ -1095,6 +1299,45 @@ static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
},
};
+static const
+struct mlxsw_sp_fid_flood_profile mlxsw_sp_fid_8021d_flood_profile = {
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .profile_id = MLXSW_SP_FID_FLOOD_PROFILE_ID_BRIDGE,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_rsp_flood_tables_cff[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_NOT_UC,
+ .table_index = 1,
+ },
+};
+
+static const
+struct mlxsw_sp_fid_flood_profile mlxsw_sp_fid_rsp_flood_profile_cff = {
+ .flood_tables = mlxsw_sp_fid_rsp_flood_tables_cff,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_rsp_flood_tables_cff),
+ .profile_id = MLXSW_SP_FID_FLOOD_PROFILE_ID_RSP,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_nve_flood_tables_cff[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_ANY,
+ .table_index = 0,
+ },
+};
+
+static const
+struct mlxsw_sp_fid_flood_profile mlxsw_sp_fid_nve_flood_profile_cff = {
+ .flood_tables = mlxsw_sp_fid_nve_flood_tables_cff,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_nve_flood_tables_cff),
+ .profile_id = MLXSW_SP_FID_FLOOD_PROFILE_ID_NVE,
+};
+
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -1110,9 +1353,35 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_rfid_setup_ctl(struct mlxsw_sp_fid *fid,
+ const void *arg)
{
+ /* In controlled mode, the FW takes care of FID placement. */
fid->fid_offset = 0;
+ return 0;
+}
+
+static int mlxsw_sp_fid_rfid_setup_cff(struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = *(const u16 *)arg;
+ struct mlxsw_sp_rif *rif;
+ bool is_lag;
+ u16 port;
+ int err;
+
+ rif = mlxsw_sp_rif_by_index(mlxsw_sp, rif_index);
+ if (!rif)
+ return -ENOENT;
+
+ err = mlxsw_sp_rif_subport_port(rif, &port, &is_lag);
+ if (err)
+ return err;
+
+ fid->fid_offset = mlxsw_sp_fid_rfid_fid_offset_cff(mlxsw_sp, port,
+ is_lag);
+ return 0;
}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
@@ -1238,8 +1507,8 @@ mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
return 0;
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
- .setup = mlxsw_sp_fid_rfid_setup,
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops_ctl = {
+ .setup = mlxsw_sp_fid_rfid_setup_ctl,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
@@ -1251,11 +1520,146 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
.nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
.vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
+ .fid_pack = mlxsw_sp_fid_pack_ctl,
+};
+
+static int
+mlxsw_sp_fid_rfid_port_add_cff(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 pgt_addr, u16 smpe, unsigned int local_port)
+{
+ int err;
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe,
+ local_port, true);
+ if (err)
+ return err;
+
+ if (flood_table->packet_type == MLXSW_SP_FLOOD_TYPE_NOT_UC) {
+ u16 router_port = mlxsw_sp_router_port(mlxsw_sp);
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe,
+ router_port, true);
+ if (err)
+ goto err_entry_port_set;
+ }
+
+ return 0;
+
+err_entry_port_set:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe, local_port,
+ false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_rfid_port_del_cff(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 pgt_addr, u16 smpe, u16 local_port)
+{
+ if (flood_table->packet_type == MLXSW_SP_FLOOD_TYPE_NOT_UC) {
+ u16 router_port = mlxsw_sp_router_port(mlxsw_sp);
+
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe,
+ router_port, false);
+ }
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe, local_port,
+ false);
+}
+
+static int
+mlxsw_sp_fid_rfid_port_memb_ft_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ const struct mlxsw_sp_port *mlxsw_sp_port,
+ bool member)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u16 fid_pgt_base;
+ u16 fid_offset;
+ u16 pgt_addr;
+ u16 smpe;
+ u16 port;
+
+ /* In-PGT SMPE is only valid on Spectrum-1, CFF only on Spectrum>1. */
+ smpe = 0;
+
+ port = mlxsw_sp_port->lagged ? mlxsw_sp_port->lag_id : local_port;
+ fid_offset = mlxsw_sp_fid_rfid_fid_offset_cff(mlxsw_sp, port,
+ mlxsw_sp_port->lagged);
+ fid_pgt_base = mlxsw_sp_fid_off_pgt_base_cff(fid_family, fid_offset);
+ pgt_addr = fid_pgt_base + flood_table->table_index;
+
+ if (member)
+ return mlxsw_sp_fid_rfid_port_add_cff(mlxsw_sp, flood_table,
+ pgt_addr, smpe,
+ local_port);
+
+ mlxsw_sp_fid_rfid_port_del_cff(mlxsw_sp, flood_table, pgt_addr, smpe,
+ local_port);
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_rfid_port_memb_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port,
+ bool member)
+{
+ int i;
+
+ for (i = 0; i < fid_family->flood_profile->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table =
+ &fid_family->flood_profile->flood_tables[i];
+ int err;
+
+ err = mlxsw_sp_fid_rfid_port_memb_ft_cff(fid_family,
+ flood_table,
+ mlxsw_sp_port, member);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_rfid_port_init_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_fid_rfid_port_memb_cff(fid_family, mlxsw_sp_port, true);
+}
+
+static void
+mlxsw_sp_fid_rfid_port_fini_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_fid_rfid_port_memb_cff(fid_family, mlxsw_sp_port, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops_cff = {
+ .setup = mlxsw_sp_fid_rfid_setup_cff,
+ .configure = mlxsw_sp_fid_rfid_configure,
+ .deconfigure = mlxsw_sp_fid_rfid_deconfigure,
+ .index_alloc = mlxsw_sp_fid_rfid_index_alloc,
+ .compare = mlxsw_sp_fid_rfid_compare,
+ .port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
+ .pgt_size = mlxsw_sp_fid_rfid_pgt_size_cff,
+ .fid_port_init = mlxsw_sp_fid_rfid_port_init_cff,
+ .fid_port_fini = mlxsw_sp_fid_rfid_port_fini_cff,
+ .fid_mid = mlxsw_sp_fid_fid_mid_cff,
+ .fid_pack = mlxsw_sp_fid_fid_pack_cff,
};
-static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
fid->fid_offset = 0;
+ return 0;
}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
@@ -1312,6 +1716,7 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
.vni_clear = mlxsw_sp_fid_dummy_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+ .fid_pack = mlxsw_sp_fid_pack,
};
static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
@@ -1395,7 +1800,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
__mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops_ctl = {
.setup = mlxsw_sp_fid_8021q_setup,
.configure = mlxsw_sp_fid_8021q_configure,
.deconfigure = mlxsw_sp_fid_8021q_deconfigure,
@@ -1409,6 +1814,29 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
.vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+ .flood_table_init = mlxsw_sp_fid_flood_table_init_ctl,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_ctl,
+ .fid_pack = mlxsw_sp_fid_pack_ctl,
+};
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops_cff = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_cff,
+ .fid_pack = mlxsw_sp_fid_fid_pack_cff,
};
/* There are 4K-2 802.1Q FIDs */
@@ -1434,10 +1862,9 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_START,
.end_index = MLXSW_SP_FID_8021Q_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_ops,
+ .ops = &mlxsw_sp_fid_8021q_ops_ctl,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
.smpe_index_valid = false,
@@ -1448,10 +1875,9 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = MLXSW_SP_FID_8021D_START,
.end_index = MLXSW_SP_FID_8021D_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
+ .ops = &mlxsw_sp_fid_8021d_ops_ctl,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.smpe_index_valid = false,
};
@@ -1465,47 +1891,45 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
.smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family_ctl = {
.type = MLXSW_SP_FID_TYPE_RFID,
.fid_size = sizeof(struct mlxsw_sp_fid),
.start_index = MLXSW_SP_RFID_START,
.end_index = MLXSW_SP_RFID_END,
.rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
+ .ops = &mlxsw_sp_fid_rfid_ops_ctl,
.flood_rsp = true,
.smpe_index_valid = false,
};
-const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+static const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
[MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
- [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family_ctl,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family_ctl = {
.type = MLXSW_SP_FID_TYPE_8021Q,
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_START,
.end_index = MLXSW_SP_FID_8021Q_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_ops,
+ .ops = &mlxsw_sp_fid_8021q_ops_ctl,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
.smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family_ctl = {
.type = MLXSW_SP_FID_TYPE_8021D,
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = MLXSW_SP_FID_8021D_START,
.end_index = MLXSW_SP_FID_8021D_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
+ .ops = &mlxsw_sp_fid_8021d_ops_ctl,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.smpe_index_valid = true,
};
@@ -1519,11 +1943,51 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.smpe_index_valid = false,
};
-const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+static const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr_ctl[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family_ctl,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family_ctl,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
- [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family_ctl,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family_cff = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops_cff,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family_cff = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops_cff,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family_cff = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .flood_profile = &mlxsw_sp_fid_rsp_flood_profile_cff,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops_cff,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr_cff[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family_cff,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family_cff,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family_cff,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -1571,7 +2035,9 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- fid->fid_family->ops->setup(fid, arg);
+ err = fid->fid_family->ops->setup(fid, arg);
+ if (err)
+ goto err_setup;
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -1589,6 +2055,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
err_rhashtable_insert:
fid->fid_family->ops->deconfigure(fid);
err_configure:
+err_setup:
__clear_bit(fid_index - fid_family->start_index,
fid_family->fids_bitmap);
err_index_alloc:
@@ -1650,36 +2117,6 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp)
}
static int
-mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
- const struct mlxsw_sp_flood_table *flood_table)
-{
- enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- const int *sfgc_packet_types;
- u16 mid_base;
- int err, i;
-
- mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
-
- sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
- for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- char sfgc_pl[MLXSW_REG_SFGC_LEN];
-
- if (!sfgc_packet_types[i])
- continue;
-
- mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
- flood_table->table_type, 0, mid_base);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
{
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
@@ -1687,22 +2124,28 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
int err;
int i;
- if (!fid_family->nr_flood_tables)
- return 0;
+ err = fid_family->ops->pgt_size(fid_family, &pgt_size);
+ if (err)
+ return err;
- pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family);
err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &fid_family->pgt_base,
pgt_size);
if (err)
return err;
- for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ if (!fid_family->flood_profile)
+ return 0;
+
+ for (i = 0; i < fid_family->flood_profile->nr_flood_tables; i++) {
const struct mlxsw_sp_flood_table *flood_table;
- flood_table = &fid_family->flood_tables[i];
- err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
- if (err)
- goto err_flood_table_init;
+ flood_table = &fid_family->flood_profile->flood_tables[i];
+ if (fid_family->ops->flood_table_init) {
+ err = fid_family->ops->flood_table_init(fid_family,
+ flood_table);
+ if (err)
+ goto err_flood_table_init;
+ }
}
return 0;
@@ -1717,11 +2160,12 @@ mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
{
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
u16 pgt_size;
+ int err;
- if (!fid_family->nr_flood_tables)
+ err = fid_family->ops->pgt_size(fid_family, &pgt_size);
+ if (WARN_ON_ONCE(err))
return;
- pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family);
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size);
}
@@ -1744,7 +2188,7 @@ static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
goto err_alloc_fids_bitmap;
}
- if (fid_family->flood_tables) {
+ if (fid_family->flood_profile) {
err = mlxsw_sp_fid_flood_tables_init(fid_family);
if (err)
goto err_fid_flood_tables_init;
@@ -1767,7 +2211,7 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
- if (fid_family->flood_tables)
+ if (fid_family->flood_profile)
mlxsw_sp_fid_flood_tables_fini(fid_family);
bitmap_free(fid_family->fids_bitmap);
@@ -1775,9 +2219,34 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
kfree(fid_family);
}
+static int mlxsw_sp_fid_port_init(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ const enum mlxsw_sp_fid_type type_rfid = MLXSW_SP_FID_TYPE_RFID;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fid_family *rfid_family;
+
+ rfid_family = mlxsw_sp->fid_core->fid_family_arr[type_rfid];
+ if (rfid_family->ops->fid_port_init)
+ return rfid_family->ops->fid_port_init(rfid_family,
+ mlxsw_sp_port);
+ return 0;
+}
+
+static void mlxsw_sp_fid_port_fini(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ const enum mlxsw_sp_fid_type type_rfid = MLXSW_SP_FID_TYPE_RFID;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fid_family *rfid_family;
+
+ rfid_family = mlxsw_sp->fid_core->fid_family_arr[type_rfid];
+ if (rfid_family->ops->fid_port_fini)
+ rfid_family->ops->fid_port_fini(rfid_family, mlxsw_sp_port);
+}
+
int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
/* Track number of FIDs configured on the port with mapping type
* PORT_VID_TO_FID, so that we know when to transition the port
@@ -1785,17 +2254,42 @@ int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
*/
mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
- return mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ err = mlxsw_sp_fid_port_init(mlxsw_sp_port);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ if (err)
+ goto err_vp_mode_set;
+
+ return 0;
+
+err_vp_mode_set:
+ mlxsw_sp_fid_port_fini(mlxsw_sp_port);
+ return err;
}
void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mlxsw_sp_fid_port_fini(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
}
-int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_fid_port_join_lag(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_fid_port_init(mlxsw_sp_port);
+}
+
+void mlxsw_sp_fid_port_leave_lag(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_fid_port_fini(mlxsw_sp_port);
+}
+
+static int
+mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_family *fid_family_arr[])
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_fid_core *fid_core;
@@ -1822,8 +2316,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
}
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
- err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp->fid_family_arr[i]);
+ err = mlxsw_sp_fid_family_register(mlxsw_sp, fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
@@ -1848,7 +2341,7 @@ err_rhashtable_fid_init:
return err;
}
-void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
int i;
@@ -1861,3 +2354,143 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&fid_core->fid_ht);
kfree(fid_core);
}
+
+static int mlxsw_sp1_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fids_init(mlxsw_sp, mlxsw_sp1_fid_family_arr);
+}
+
+const struct mlxsw_sp_fid_core_ops mlxsw_sp1_fid_core_ops = {
+ .init = mlxsw_sp1_fids_init,
+ .fini = mlxsw_sp_fids_fini,
+};
+
+static int mlxsw_sp_fid_check_flood_profile_id(struct mlxsw_sp *mlxsw_sp,
+ int profile_id)
+{
+ u32 max_profiles;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_FLOOD_PRF))
+ return -EIO;
+
+ max_profiles = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_FLOOD_PRF);
+ if (WARN_ON_ONCE(!profile_id) ||
+ WARN_ON_ONCE(profile_id >= max_profiles))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_fids_init_flood_table(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_flood_profile_id profile_id,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ const int *sfgc_packet_types;
+ int err;
+ int i;
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ char sffp_pl[MLXSW_REG_SFFP_LEN];
+
+ if (!sfgc_packet_types[i])
+ continue;
+
+ mlxsw_reg_sffp_pack(sffp_pl, profile_id, i,
+ flood_table->table_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sffp), sffp_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_fids_init_flood_profile(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_flood_profile *
+ flood_profile)
+{
+ int err;
+ int i;
+
+ err = mlxsw_sp_fid_check_flood_profile_id(mlxsw_sp,
+ flood_profile->profile_id);
+ if (err)
+ return err;
+
+ for (i = 0; i < flood_profile->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &flood_profile->flood_tables[i];
+ err = mlxsw_sp2_fids_init_flood_table(mlxsw_sp,
+ flood_profile->profile_id,
+ flood_table);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const
+struct mlxsw_sp_fid_flood_profile *mlxsw_sp_fid_flood_profiles[] = {
+ &mlxsw_sp_fid_8021d_flood_profile,
+ &mlxsw_sp_fid_rsp_flood_profile_cff,
+ &mlxsw_sp_fid_nve_flood_profile_cff,
+};
+
+static int
+mlxsw_sp2_fids_init_flood_profiles(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_fid_flood_profiles); i++) {
+ const struct mlxsw_sp_fid_flood_profile *flood_profile;
+
+ flood_profile = mlxsw_sp_fid_flood_profiles[i];
+ err = mlxsw_sp2_fids_init_flood_profile(mlxsw_sp,
+ flood_profile);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp2_fids_init_ctl(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fids_init(mlxsw_sp, mlxsw_sp2_fid_family_arr_ctl);
+}
+
+static int mlxsw_sp2_fids_init_cff(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp2_fids_init_flood_profiles(mlxsw_sp);
+ if (err)
+ return err;
+
+ return mlxsw_sp_fids_init(mlxsw_sp, mlxsw_sp2_fid_family_arr_cff);
+}
+
+static int mlxsw_sp2_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ switch (mlxsw_core_flood_mode(mlxsw_sp->core)) {
+ case MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED:
+ return mlxsw_sp2_fids_init_ctl(mlxsw_sp);
+ case MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF:
+ return mlxsw_sp2_fids_init_cff(mlxsw_sp);
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+}
+
+const struct mlxsw_sp_fid_core_ops mlxsw_sp2_fid_core_ops = {
+ .init = mlxsw_sp2_fids_init,
+ .fini = mlxsw_sp_fids_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 82a95125d9ca..7164f9e6370f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8419,6 +8419,9 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->ops = ops;
rif->rif_entries = rif_entries;
+ if (ops->setup)
+ ops->setup(rif, params);
+
if (ops->fid_get) {
fid = ops->fid_get(rif, params, extack);
if (IS_ERR(fid)) {
@@ -8428,9 +8431,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->fid = fid;
}
- if (ops->setup)
- ops->setup(rif, params);
-
err = ops->configure(rif, extack);
if (err)
goto err_configure;
@@ -8660,6 +8660,20 @@ mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
return container_of(rif, struct mlxsw_sp_rif_subport, common);
}
+int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
+ u16 *port, bool *is_lag)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ if (WARN_ON(rif->ops->type != MLXSW_SP_RIF_TYPE_SUBPORT))
+ return -EINVAL;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ *is_lag = rif_subport->lag;
+ *port = *is_lag ? rif_subport->lag_id : rif_subport->system_port;
+ return 0;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -11458,6 +11472,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_netevent_notifier;
+ mlxsw_sp->router->netdevice_nb.notifier_call =
+ mlxsw_sp_router_netdevice_event;
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->netdevice_nb);
+ if (err)
+ goto err_register_netdev_notifier;
+
mlxsw_sp->router->nexthop_nb.notifier_call =
mlxsw_sp_nexthop_obj_event;
err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
@@ -11473,22 +11494,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_fib_notifier;
- mlxsw_sp->router->netdevice_nb.notifier_call =
- mlxsw_sp_router_netdevice_event;
- err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->netdevice_nb);
- if (err)
- goto err_register_netdev_notifier;
-
return 0;
-err_register_netdev_notifier:
- unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->fib_nb);
err_register_fib_notifier:
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->nexthop_nb);
err_register_nexthop_notifier:
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &router->netdevice_nb);
+err_register_netdev_notifier:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
@@ -11536,11 +11550,11 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_router *router = mlxsw_sp->router;
- unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
- &router->netdevice_nb);
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
&router->nexthop_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &router->netdevice_nb);
unregister_netevent_notifier(&router->netevent_nb);
unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 88e26c120b48..54f2eac11a63 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -156,7 +156,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
msg = &kss->spi_msg2;
xfer = kss->spi_xfer2;
@@ -180,7 +180,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
ret = spi_sync(kss->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
+ else if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
else
memcpy(rxb, trx + 2, rxl);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 6961cfc55fb9..a2b3f4433ca8 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -934,11 +934,11 @@ static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
}
static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
- u32 *indir, u8 *key, u8 *hfunc)
+ struct ethtool_rxfh_param *rxfh)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- if (indir) {
+ if (rxfh->indir) {
int dw_index;
int byte_index = 0;
@@ -947,17 +947,17 @@ static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
lan743x_csr_read(adapter, RFE_INDX(dw_index));
byte_index = dw_index << 2;
- indir[byte_index + 0] =
+ rxfh->indir[byte_index + 0] =
((four_entries >> 0) & 0x000000FF);
- indir[byte_index + 1] =
+ rxfh->indir[byte_index + 1] =
((four_entries >> 8) & 0x000000FF);
- indir[byte_index + 2] =
+ rxfh->indir[byte_index + 2] =
((four_entries >> 16) & 0x000000FF);
- indir[byte_index + 3] =
+ rxfh->indir[byte_index + 3] =
((four_entries >> 24) & 0x000000FF);
}
}
- if (key) {
+ if (rxfh->key) {
int dword_index;
int byte_index = 0;
@@ -967,28 +967,30 @@ static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
RFE_HASH_KEY(dword_index));
byte_index = dword_index << 2;
- key[byte_index + 0] =
+ rxfh->key[byte_index + 0] =
((four_entries >> 0) & 0x000000FF);
- key[byte_index + 1] =
+ rxfh->key[byte_index + 1] =
((four_entries >> 8) & 0x000000FF);
- key[byte_index + 2] =
+ rxfh->key[byte_index + 2] =
((four_entries >> 16) & 0x000000FF);
- key[byte_index + 3] =
+ rxfh->key[byte_index + 3] =
((four_entries >> 24) & 0x000000FF);
}
}
- if (hfunc)
- (*hfunc) = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
- const u32 *indir, const u8 *key,
- const u8 hfunc)
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 *indir = rxfh->indir;
+ u8 *key = rxfh->key;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
@@ -1075,7 +1077,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
buf = lan743x_csr_read(adapter, MAC_CR);
if (buf & MAC_CR_EEE_EN_) {
eee->eee_enabled = true;
- eee->eee_active = !!(eee->advertised & eee->lp_advertised);
eee->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index b648461787d2..be79cb0ae5af 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1075,7 +1075,7 @@ struct lan743x_adapter {
#define DMA_DESCRIPTOR_SPACING_32 (32)
#define DMA_DESCRIPTOR_SPACING_64 (64)
#define DMA_DESCRIPTOR_SPACING_128 (128)
-#define DEFAULT_DMA_DESCRIPTOR_SPACING (L1_CACHE_BYTES)
+#define DEFAULT_DMA_DESCRIPTOR_SPACING (DMA_DESCRIPTOR_SPACING_16)
#define DMAC_CHANNEL_STATE_SET(start_bit, stop_bit) \
(((start_bit) ? 2 : 0) | ((stop_bit) ? 1 : 0))
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
index 41fa2523d91d..5f2cd9a8cf8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
- struct net_device *bond = lan966x->ports[lag]->bond;
+ struct lan966x_port *port = lan966x->ports[lag];
int num_active_ports = 0;
+ struct net_device *bond;
unsigned long bond_mask;
u8 aggr_idx[16];
- if (!bond || (visited & BIT(lag)))
+ if (!port || !port->bond || (visited & BIT(lag)))
continue;
+ bond = port->bond;
bond_mask = lan966x_lag_get_mask(lan966x, bond);
for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
struct lan966x_port *port = lan966x->ports[p];
+ if (!port)
+ continue;
+
lan_wr(ANA_PGID_PGID_SET(bond_mask),
lan966x, ANA_PGID(p));
if (port->lag_tx_active)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 92108d354051..2e83bbb9477e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port)
lan966x_taprio_speed_set(port, config->speed);
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
- * port speed for QSGMII ports.
+ * port speed for QSGMII or SGMII ports.
*/
- if (phy_interface_num_ports(config->portmode) == 4)
+ if (phy_interface_num_ports(config->portmode) == 4 ||
+ config->portmode == PHY_INTERFACE_MODE_SGMII)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 37d2584b48a7..a06dc5a9b355 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1012,7 +1012,7 @@ static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
return;
for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
- ethtool_sprintf(&data, "%s", sparx5->stats_layout[idx]);
+ ethtool_puts(&data, sparx5->stats_layout[idx]);
}
static void sparx5_get_sset_data(struct net_device *ndev,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 4af285918ea2..75868b3f548e 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
list) {
if ((vid == 0 || mact_entry->vid == vid) &&
ether_addr_equal(addr, mact_entry->mac)) {
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+
list_del(&mact_entry->list);
devm_kfree(sparx5->dev, mact_entry);
-
- sparx5_mact_forget(sparx5, addr, mact_entry->vid);
}
}
mutex_unlock(&sparx5->mact_lock);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index d1f7fc8b1b71..3c066b62e689 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sparx5);
sparx5->pdev = pdev;
sparx5->dev = &pdev->dev;
+ spin_lock_init(&sparx5->tx_lock);
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 6f565c0c0c3d..316fed5f2735 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -280,6 +280,7 @@ struct sparx5 {
int xtr_irq;
/* Frame DMA */
int fdma_irq;
+ spinlock_t tx_lock; /* lock for frame transmission */
struct sparx5_rx rx;
struct sparx5_tx tx;
/* PTP */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 6db6ac6a3bbc..ac7e1cffbcec 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
+ spin_unlock(&sparx5->tx_lock);
if (ret == -EBUSY)
goto busy;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 6367de0c2c2e..d33b27214539 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -158,6 +158,9 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
if (dev_type == GDMA_DEVICE_MANA) {
gc->mana.gdma_context = gc;
gc->mana.dev_id = dev;
+ } else if (dev_type == GDMA_DEVICE_MANA_IB) {
+ gc->mana_ib.dev_id = dev;
+ gc->mana_ib.gdma_context = gc;
}
}
@@ -414,8 +417,12 @@ static void mana_gd_process_eq_events(void *arg)
old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
/* No more entries */
- if (owner_bits == old_bits)
+ if (owner_bits == old_bits) {
+ /* return here without ringing the doorbell */
+ if (i == 0)
+ return;
break;
+ }
new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
if (owner_bits != new_bits) {
@@ -445,42 +452,29 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
- struct gdma_resource *r;
unsigned int msi_index;
unsigned long flags;
struct device *dev;
int err = 0;
gc = gd->gdma_context;
- r = &gc->msix_resource;
dev = gc->dev;
+ msi_index = spec->eq.msix_index;
- spin_lock_irqsave(&r->lock, flags);
-
- msi_index = find_first_zero_bit(r->map, r->size);
- if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
+ if (msi_index >= gc->num_msix_usable) {
err = -ENOSPC;
- } else {
- bitmap_set(r->map, msi_index, 1);
- queue->eq.msix_index = msi_index;
- }
-
- spin_unlock_irqrestore(&r->lock, flags);
-
- if (err) {
- dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
- err, msi_index, r->size, gc->num_msix_usable);
+ dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
+ err, msi_index, gc->num_msix_usable);
return err;
}
+ queue->eq.msix_index = msi_index;
gic = &gc->irq_contexts[msi_index];
- WARN_ON(gic->handler || gic->arg);
-
- gic->arg = queue;
-
- gic->handler = mana_gd_process_eq_events;
+ spin_lock_irqsave(&gic->lock, flags);
+ list_add_rcu(&queue->entry, &gic->eq_list);
+ spin_unlock_irqrestore(&gic->lock, flags);
return 0;
}
@@ -490,12 +484,11 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
- struct gdma_resource *r;
unsigned int msix_index;
unsigned long flags;
+ struct gdma_queue *eq;
gc = gd->gdma_context;
- r = &gc->msix_resource;
/* At most num_online_cpus() + 1 interrupts are used. */
msix_index = queue->eq.msix_index;
@@ -503,14 +496,17 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
return;
gic = &gc->irq_contexts[msix_index];
- gic->handler = NULL;
- gic->arg = NULL;
-
- spin_lock_irqsave(&r->lock, flags);
- bitmap_clear(r->map, msix_index, 1);
- spin_unlock_irqrestore(&r->lock, flags);
+ spin_lock_irqsave(&gic->lock, flags);
+ list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
+ if (queue == eq) {
+ list_del_rcu(&eq->entry);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gic->lock, flags);
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ synchronize_rcu();
}
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
@@ -588,6 +584,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
int err;
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ queue->id = INVALID_QUEUE_ID;
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
@@ -819,6 +816,7 @@ free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -895,6 +893,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
+EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -971,6 +970,7 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1001,6 +1001,7 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1217,9 +1218,14 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
struct gdma_irq_context *gic = arg;
+ struct list_head *eq_list = &gic->eq_list;
+ struct gdma_queue *eq;
- if (gic->handler)
- gic->handler(gic->arg);
+ rcu_read_lock();
+ list_for_each_entry_rcu(eq, eq_list, entry) {
+ gic->handler(eq);
+ }
+ rcu_read_unlock();
return IRQ_HANDLED;
}
@@ -1271,8 +1277,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
- gic->handler = NULL;
- gic->arg = NULL;
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
if (!i)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
@@ -1295,10 +1302,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
- if (err)
- goto free_irq;
-
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
@@ -1329,8 +1332,6 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (gc->max_num_msix < 1)
return;
- mana_gd_free_res_map(&gc->msix_resource);
-
for (i = 0; i < gc->max_num_msix; i++) {
irq = pci_irq_vector(pdev, i);
if (irq < 0)
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 9d1cd3bfcf66..2729a2c5acf9 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -300,6 +300,7 @@ static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
spec.eq.context = ctx;
spec.eq.callback = cb;
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
+ spec.eq.msix_index = 0;
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index fc3d2903a80f..59287c6e6cee 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1244,6 +1244,7 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
for (i = 0; i < gc->max_num_queues; i++) {
+ spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
@@ -2137,6 +2138,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.pool_size = RX_BUFFERS_PER_QUEUE;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
+ pprm.netdev = rxq->ndev;
rxq->page_pool = page_pool_create(&pprm);
@@ -2385,13 +2387,33 @@ void mana_query_gf_stats(struct mana_port_context *apc)
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
- req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
+ req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
+ STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
+ STATISTICS_FLAGS_HC_RX_BYTES |
+ STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
+ STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
+ STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
+ STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
+ STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
+ STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
+ STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
+ STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
+ STATISTICS_FLAGS_HC_TX_BYTES |
STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
- STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
+ STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
+ STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
@@ -2407,6 +2429,30 @@ void mana_query_gf_stats(struct mana_port_context *apc)
return;
}
+ apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ resp.tx_err_inval_vport_offset_pkt;
+ apc->eth_stats.hc_tx_err_vlan_enforcement =
+ resp.tx_err_vlan_enforcement;
+ apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ resp.tx_err_ethtype_enforcement;
+ apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ resp.tx_err_SQPDID_enforcement;
+ apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ resp.tx_err_CQPDID_enforcement;
+ apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
@@ -2414,6 +2460,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
static int mana_init_port(struct net_device *ndev)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 607150165ab4..ab2413d71f6c 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,6 +13,46 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ hc_rx_discards_no_wqe)},
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ hc_rx_err_vport_disabled)},
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_ucast_pkts)},
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_ucast_bytes)},
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_bcast_pkts)},
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_bcast_bytes)},
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_mcast_bytes)},
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_gf_disabled)},
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_vport_disabled)},
+ {"hc_tx_err_inval_vportoffset_pkt",
+ offsetof(struct mana_ethtool_stats,
+ hc_tx_err_inval_vportoffset_pkt)},
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_vlan_enforcement)},
+ {"hc_tx_err_eth_type_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_sa_enforcement)},
+ {"hc_tx_err_sqpdid_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ {"hc_tx_err_cqpdid_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_mtu_violation)},
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_inval_oob)},
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_gdma)},
{"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
{"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
hc_tx_ucast_pkts)},
@@ -208,28 +248,28 @@ static u32 mana_rss_indir_size(struct net_device *ndev)
return MANA_INDIRECT_TABLE_SIZE;
}
-static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mana_get_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mana_port_context *apc = netdev_priv(ndev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- indir[i] = apc->indir_table[i];
+ rxfh->indir[i] = apc->indir_table[i];
}
- if (key)
- memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
+ if (rxfh->key)
+ memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
return 0;
}
-static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mana_set_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
@@ -240,25 +280,26 @@ static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
if (!apc->port_is_up)
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- if (indir[i] >= apc->num_queues)
+ if (rxfh->indir[i] >= apc->num_queues)
return -EINVAL;
update_table = true;
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
save_table[i] = apc->indir_table[i];
- apc->indir_table[i] = indir[i];
+ apc->indir_table[i] = rxfh->indir[i];
}
}
- if (key) {
+ if (rxfh->key) {
update_hash = true;
memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
- memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
+ memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
}
err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 61d8bfd12d5f..55408f16fbbc 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -414,6 +414,7 @@ static const u64 fix_mac[] = {
END_SIGN
};
+MODULE_DESCRIPTION("Neterion 10GbE driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 2967bab72505..15180538b80a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_
mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
return;
+ /* Both struct tcphdr and struct udphdr start with
+ * __be16 source;
+ * __be16 dest;
+ * so we can use the same code for both.
+ */
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
- mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
- mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+ if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) {
+ mangle_action->mangle.val =
+ (__force u32)cpu_to_be32(mangle_action->mangle.val << 16);
+ /* The mask of mangle action is inverse mask,
+ * so clear the dest tp port with 0xFFFF to
+ * instead of rotate-left operation.
+ */
+ mangle_action->mangle.mask =
+ (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF);
+ }
+ if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) {
+ mangle_action->mangle.offset = 0;
+ mangle_action->mangle.val =
+ (__force u32)cpu_to_be32(mangle_action->mangle.val);
+ mangle_action->mangle.mask =
+ (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ }
return;
default:
@@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_fl_ct_flow_entry *ct_entry;
+ struct flow_action_entry *ct_goto;
struct nfp_fl_ct_zone_entry *zt;
+ struct flow_action_entry *act;
bool wildcarded = false;
struct flow_match_ct ct;
- struct flow_action_entry *ct_goto;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ case FLOW_ACTION_MIRRED:
+ case FLOW_ACTION_MIRRED_INGRESS:
+ if (act->dev->rtnl_link_ops &&
+ !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: out port is openvswitch internal port");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+ }
flow_rule_match_ct(rule, &ct);
if (!ct.mask->ct_zone) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 88d6d992e7d0..361d7c495e2d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -76,7 +76,7 @@ struct nfp_fl_lag_group {
/* Use this ID with zero members to ack a batch config */
#define NFP_FL_LAG_SYNC_ID 0
#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
-#define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */
+#define NFP_FL_LAG_GROUP_MAX 31 /* IDs 1 to 31 are valid */
/* wait for more config */
#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
@@ -111,8 +111,8 @@ nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
- id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
- NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
+ id = ida_alloc_range(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
+ NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
if (id < 0) {
nfp_flower_cmsg_warn(priv->app,
"No more bonding groups available\n");
@@ -121,7 +121,7 @@ nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
group = kmalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
- ida_simple_remove(&lag->ida_handle, id);
+ ida_free(&lag->ida_handle, id);
return ERR_PTR(-ENOMEM);
}
@@ -328,8 +328,7 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
}
if (entry->to_destroy) {
- ida_simple_remove(&lag->ida_handle,
- entry->group_id);
+ ida_free(&lag->ida_handle, entry->group_id);
list_del(&entry->list);
kfree(entry);
}
@@ -415,7 +414,7 @@ nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
struct nfp_flower_cmsg_lag_config *cmsg_payload;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
- if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
+ if (be32_to_cpu(cmsg_payload->group_id) > NFP_FL_LAG_GROUP_MAX)
return -EINVAL;
/* Drop cmsg retrans if storage limit is exceeded to prevent
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index e522845c7c21..0d7d138d6e0d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
if (entry->bridge_count ||
!nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 17381bfc15d7..d215efc6cad0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -74,7 +74,7 @@ static void
nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
{
- u32 l3_offset, l4_offset, hdrlen;
+ u32 l3_offset, l4_offset, hdrlen, l4_hdrlen;
u16 mss;
if (!skb_is_gso(skb))
@@ -83,13 +83,16 @@ nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : tcp_hdrlen(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : inner_tcp_hdrlen(skb);
}
+ hdrlen = l4_offset + l4_hdrlen;
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index 8d78c6faefa8..dae5af7d1845 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -40,20 +40,23 @@ static __le64
nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
struct sk_buff *skb)
{
- u32 segs, hdrlen, l3_offset, l4_offset;
+ u32 segs, hdrlen, l3_offset, l4_offset, l4_hdrlen;
struct nfp_nfdk_tx_desc txd;
u16 mss;
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : tcp_hdrlen(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : inner_tcp_hdrlen(skb);
}
+ hdrlen = l4_offset + l4_hdrlen;
segs = skb_shinfo(skb)->gso_segs;
mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 8c6954c58a88..635d33c0d6d3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -75,8 +75,10 @@ nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
if (ret)
return ret;
- if (eth_port.port_lanes % count)
+ if (eth_port.port_lanes % count) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid count");
return -EINVAL;
+ }
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
@@ -101,8 +103,10 @@ nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
if (ret)
return ret;
- if (!eth_port.is_split)
+ if (!eth_port.is_split) {
+ NL_SET_ERR_MSG_MOD(extack, "port is not split");
return -EINVAL;
+ }
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 939cfce15830..46764aeccb37 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -621,6 +621,9 @@ struct nfp_net_dp {
* @mbox_amsg.lock: Protect message list
* @mbox_amsg.list: List of message to process
* @mbox_amsg.work: Work to process message asynchronously
+ * @fs: Flow steering
+ * @fs.count: Flow count
+ * @fs.list: List of flows
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -728,9 +731,39 @@ struct nfp_net {
struct work_struct work;
} mbox_amsg;
+ struct {
+ u16 count;
+ struct list_head list;
+ } fs;
+
void *app_priv;
};
+struct nfp_fs_entry {
+ struct list_head node;
+ u32 flow_type;
+ u32 loc;
+ struct {
+ union {
+ struct {
+ __be32 sip4;
+ __be32 dip4;
+ };
+ struct {
+ __be32 sip6[4];
+ __be32 dip6[4];
+ };
+ };
+ union {
+ __be16 l3_proto;
+ u8 l4_proto;
+ };
+ __be16 sport;
+ __be16 dport;
+ } key, msk;
+ u64 action;
+};
+
struct nfp_mbox_amsg_entry {
struct list_head list;
int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
@@ -933,9 +966,9 @@ static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
netdev->netdev_ops == &nfp_nfdk_netdev_ops;
}
-static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
+static inline int nfp_net_coalesce_para_check(u32 param)
{
- if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
+ if (param >= ((1 << 16) - 1))
return -EINVAL;
return 0;
@@ -987,6 +1020,9 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index de0a5d5ded30..f28e769e6fda 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1176,7 +1176,8 @@ static void nfp_net_rx_dim_work(struct work_struct *work)
* count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16;
- if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ if (nfp_net_coalesce_para_check(factor * moder.usec) ||
+ nfp_net_coalesce_para_check(moder.pkts))
return;
/* copy RX interrupt coalesce parameters */
@@ -1205,7 +1206,8 @@ static void nfp_net_tx_dim_work(struct work_struct *work)
* count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16;
- if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ if (nfp_net_coalesce_para_check(factor * moder.usec) ||
+ nfp_net_coalesce_para_check(moder.pkts))
return;
/* copy TX interrupt coalesce parameters */
@@ -1763,6 +1765,186 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
+static void
+nfp_net_fs_fill_v4(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip, m_sip, k_dip, m_dip;
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[7];
+ } v4_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v4_rule.loc = cpu_to_be16(entry->loc);
+ v4_rule.k_proto = entry->key.l4_proto;
+ v4_rule.m_proto = entry->msk.l4_proto;
+ v4_rule.k_sip = entry->key.sip4;
+ v4_rule.m_sip = entry->msk.sip4;
+ v4_rule.k_dip = entry->key.dip4;
+ v4_rule.m_dip = entry->msk.dip4;
+ v4_rule.k_sport = entry->key.sport;
+ v4_rule.m_sport = entry->msk.sport;
+ v4_rule.k_dport = entry->key.dport;
+ v4_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v4_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v4_rule.val[i]));
+}
+
+static void
+nfp_net_fs_fill_v6(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip[4], m_sip[4], k_dip[4], m_dip[4];
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[19];
+ } v6_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v6_rule.loc = cpu_to_be16(entry->loc);
+ v6_rule.k_proto = entry->key.l4_proto;
+ v6_rule.m_proto = entry->msk.l4_proto;
+ for (i = 0; i < 4; i++) {
+ v6_rule.k_sip[i] = entry->key.sip6[i];
+ v6_rule.m_sip[i] = entry->msk.sip6[i];
+ v6_rule.k_dip[i] = entry->key.dip6[i];
+ v6_rule.m_dip[i] = entry->msk.dip6[i];
+ }
+ v6_rule.k_sport = entry->key.sport;
+ v6_rule.m_sport = entry->msk.sport;
+ v6_rule.k_dport = entry->key.dport;
+ v6_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v6_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v6_rule.val[i]));
+}
+
+#define NFP_FS_QUEUE_ID GENMASK(22, 16)
+#define NFP_FS_ACT GENMASK(15, 0)
+#define NFP_FS_ACT_DROP BIT(0)
+#define NFP_FS_ACT_Q BIT(1)
+static void
+nfp_net_fs_fill_act(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 addr)
+{
+ u32 action = 0; /* 0 means default passthrough */
+
+ if (entry->action == RX_CLS_FLOW_DISC)
+ action = NFP_FS_ACT_DROP;
+ else if (!(entry->flow_type & FLOW_RSS))
+ action = FIELD_PREP(NFP_FS_QUEUE_ID, entry->action) | NFP_FS_ACT_Q;
+
+ nn_writel(nn, addr, action);
+}
+
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Add new fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Delete fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void nfp_net_fs_clean(struct nfp_net *nn)
+{
+ struct nfp_fs_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &nn->fs.list, node) {
+ nfp_net_fs_del_hw(nn, entry);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -1934,7 +2116,10 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ hdrlen = skb_inner_transport_offset(skb) + sizeof(struct udphdr);
+ else
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
@@ -2237,7 +2422,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2266,6 +2451,7 @@ void nfp_net_info(struct nfp_net *nn)
"RXCSUM_COMPLETE " : "",
nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
+ nn->cap_w1 & NFP_NET_CFG_CTRL_USO ? "USO " : "",
nfp_app_extra_cap(nn->app, nn));
}
@@ -2514,6 +2700,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
nn->cap & NFP_NET_CFG_CTRL_LSO2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_USO)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
NFP_NET_CFG_CTRL_LSO;
}
@@ -2588,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
@@ -2740,6 +2929,8 @@ int nfp_net_init(struct nfp_net *nn)
INIT_LIST_HEAD(&nn->mbox_amsg.list);
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
+ INIT_LIST_HEAD(&nn->fs.list);
+
return register_netdev(nn->dp.netdev);
err_clean_mbox:
@@ -2759,6 +2950,7 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
+ nfp_net_fs_clean(nn);
flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3e63f6d6a563..634c63c7f7eb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -269,6 +269,8 @@
#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */
+#define NFP_NET_CFG_CTRL_FLOW_STEER (0x1 << 8) /* Flow steering */
+#define NFP_NET_CFG_CTRL_USO (0x1 << 16) /* UDP segmentation offload */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
@@ -418,6 +420,8 @@
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
+#define NFP_NET_CFG_MBOX_CMD_FLOW_STEER 10
+
/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
@@ -440,6 +444,18 @@
#define NFP_NET_CFG_MULTICAST_MAC_LO (NFP_NET_CFG_MULTICAST + 6)
#define NFP_NET_CFG_MULTICAST_SZ 0x0006
+/* Max size of FS rules in bytes */
+#define NFP_NET_CFG_FS_SZ 0x0054
+/* Sub commands for FS */
+enum {
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE,
+};
+
/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e75cbb287625..fbca8d0efd85 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -633,7 +633,8 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->tx_pending = nn->dp.txd_cnt;
}
-static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt,
+ struct netlink_ext_ack *extack)
{
struct nfp_net_dp *dp;
@@ -644,7 +645,7 @@ static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
dp->rxd_cnt = rxd_cnt;
dp->txd_cnt = txd_cnt;
- return nfp_net_ring_reconfig(nn, dp, NULL);
+ return nfp_net_ring_reconfig(nn, dp, extack);
}
static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -657,7 +658,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
- return -EINVAL;
+ return -EOPNOTSUPP;
qc_min = nn->dev_info->min_qc_size;
qc_max = nn->dev_info->max_qc_size;
@@ -666,9 +667,15 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
- txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
+ if (rxd_cnt < qc_min || rxd_cnt > qc_max) {
+ NL_SET_ERR_MSG_MOD(extack, "rx parameter out of bounds");
return -EINVAL;
+ }
+
+ if (txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp) {
+ NL_SET_ERR_MSG_MOD(extack, "tx parameter out of bounds");
+ return -EINVAL;
+ }
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
return 0;
@@ -676,7 +683,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
- return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
+ return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt, extack);
}
static int nfp_test_link(struct net_device *netdev)
@@ -800,7 +807,7 @@ static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
if (nfp_self_test[i].is_supported(netdev))
- ethtool_sprintf(&data, nfp_self_test[i].name);
+ ethtool_puts(&data, nfp_self_test[i].name);
}
static int nfp_get_self_test_count(struct net_device *netdev)
@@ -852,24 +859,24 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
ethtool_sprintf(&data, "rvec_%u_tx_busy", i);
}
- ethtool_sprintf(&data, "hw_rx_csum_ok");
- ethtool_sprintf(&data, "hw_rx_csum_inner_ok");
- ethtool_sprintf(&data, "hw_rx_csum_complete");
- ethtool_sprintf(&data, "hw_rx_csum_err");
- ethtool_sprintf(&data, "rx_replace_buf_alloc_fail");
- ethtool_sprintf(&data, "rx_tls_decrypted_packets");
- ethtool_sprintf(&data, "hw_tx_csum");
- ethtool_sprintf(&data, "hw_tx_inner_csum");
- ethtool_sprintf(&data, "tx_gather");
- ethtool_sprintf(&data, "tx_lso");
- ethtool_sprintf(&data, "tx_tls_encrypted_packets");
- ethtool_sprintf(&data, "tx_tls_ooo");
- ethtool_sprintf(&data, "tx_tls_drop_no_sync_data");
-
- ethtool_sprintf(&data, "hw_tls_no_space");
- ethtool_sprintf(&data, "rx_tls_resync_req_ok");
- ethtool_sprintf(&data, "rx_tls_resync_req_ign");
- ethtool_sprintf(&data, "rx_tls_resync_sent");
+ ethtool_puts(&data, "hw_rx_csum_ok");
+ ethtool_puts(&data, "hw_rx_csum_inner_ok");
+ ethtool_puts(&data, "hw_rx_csum_complete");
+ ethtool_puts(&data, "hw_rx_csum_err");
+ ethtool_puts(&data, "rx_replace_buf_alloc_fail");
+ ethtool_puts(&data, "rx_tls_decrypted_packets");
+ ethtool_puts(&data, "hw_tx_csum");
+ ethtool_puts(&data, "hw_tx_inner_csum");
+ ethtool_puts(&data, "tx_gather");
+ ethtool_puts(&data, "tx_lso");
+ ethtool_puts(&data, "tx_tls_encrypted_packets");
+ ethtool_puts(&data, "tx_tls_ooo");
+ ethtool_puts(&data, "tx_tls_drop_no_sync_data");
+
+ ethtool_puts(&data, "hw_tls_no_space");
+ ethtool_puts(&data, "rx_tls_resync_req_ok");
+ ethtool_puts(&data, "rx_tls_resync_req_ign");
+ ethtool_puts(&data, "rx_tls_resync_sent");
return data;
}
@@ -943,13 +950,13 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
swap_off = repr * NN_ET_SWITCH_STATS_LEN;
for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
- ethtool_sprintf(&data, nfp_net_et_stats[i + swap_off].name);
+ ethtool_puts(&data, nfp_net_et_stats[i + swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
- ethtool_sprintf(&data, nfp_net_et_stats[i - swap_off].name);
+ ethtool_puts(&data, nfp_net_et_stats[i - swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&data, nfp_net_et_stats[i].name);
+ ethtool_puts(&data, nfp_net_et_stats[i].name);
for (i = 0; i < num_vecs; i++) {
ethtool_sprintf(&data, "rxq_%u_pkts", i);
@@ -1317,6 +1324,116 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
return 0;
}
+#define NFP_FS_MAX_ENTRY 1024
+
+static int nfp_net_fs_to_ethtool(struct nfp_fs_entry *entry, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ unsigned int i;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.tcp_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.tcp_ip4_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip4_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.tcp_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.tcp_ip4_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip4_spec.pdst = entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.usr_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.usr_ip4_spec.proto = entry->key.l4_proto;
+ fs->m_u.usr_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.usr_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.usr_ip4_spec.proto = entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.usr_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.usr_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.usr_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.usr_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.usr_ip6_spec.l4_proto = entry->key.l4_proto;
+ fs->m_u.usr_ip6_spec.l4_proto = entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fs->h_u.ether_spec.h_proto = entry->key.l3_proto;
+ fs->m_u.ether_spec.h_proto = entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fs->flow_type = entry->flow_type;
+ fs->ring_cookie = entry->action;
+
+ if (fs->flow_type & FLOW_RSS) {
+ /* Only rss_context of 0 is supported. */
+ cmd->rss_context = 0;
+ /* RSS is used, mask the ring. */
+ fs->ring_cookie |= ETHTOOL_RX_FLOW_SPEC_RING;
+ }
+
+ return 0;
+}
+
+static int nfp_net_get_fs_rule(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location)
+ return nfp_net_fs_to_ethtool(entry, cmd);
+
+ if (entry->loc > cmd->fs.location)
+ /* no need to continue */
+ return -ENOENT;
+ }
+
+ return -ENOENT;
+}
+
+static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
+{
+ struct nfp_fs_entry *entry;
+ u32 count = 0;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(entry, &nn->fs.list, node)
+ rule_locs[count++] = entry->loc;
+
+ return 0;
+}
+
static int nfp_net_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
@@ -1326,6 +1443,14 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nn->dp.num_rx_rings;
return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = nn->fs.count;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return nfp_net_get_fs_rule(nn, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = NFP_FS_MAX_ENTRY;
+ return nfp_net_get_fs_loc(nn, rule_locs);
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
default:
@@ -1385,6 +1510,253 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return 0;
}
+static int nfp_net_fs_from_ethtool(struct nfp_fs_entry *entry, struct ethtool_rx_flow_spec *fs)
+{
+ unsigned int i;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ entry->msk.sip4 = fs->m_u.tcp_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.tcp_ip4_spec.ip4dst;
+ entry->msk.sport = fs->m_u.tcp_ip4_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip4_spec.pdst;
+ entry->key.sip4 = fs->h_u.tcp_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.tcp_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.sport = fs->h_u.tcp_ip4_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip4_spec.pdst & entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.sport = fs->m_u.tcp_ip6_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip6_spec.pdst;
+ entry->key.sport = fs->h_u.tcp_ip6_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip6_spec.pdst & entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ entry->msk.sip4 = fs->m_u.usr_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.usr_ip4_spec.ip4dst;
+ entry->msk.l4_proto = fs->m_u.usr_ip4_spec.proto;
+ entry->key.sip4 = fs->h_u.usr_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.usr_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.l4_proto = fs->h_u.usr_ip4_spec.proto & entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.usr_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.usr_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.usr_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.usr_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.l4_proto = fs->m_u.usr_ip6_spec.l4_proto;
+ entry->key.l4_proto = fs->h_u.usr_ip6_spec.l4_proto & entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ entry->msk.l3_proto = fs->m_u.ether_spec.h_proto;
+ entry->key.l3_proto = fs->h_u.ether_spec.h_proto & entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_TCP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_UDP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_SCTP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ }
+
+ entry->flow_type = fs->flow_type;
+ entry->action = fs->ring_cookie;
+ entry->loc = fs->location;
+
+ return 0;
+}
+
+static int nfp_net_fs_check_existing(struct nfp_net *nn, struct nfp_fs_entry *new)
+{
+ struct nfp_fs_entry *entry;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (new->loc != entry->loc &&
+ !((new->flow_type ^ entry->flow_type) & ~FLOW_RSS) &&
+ !memcmp(&new->key, &entry->key, sizeof(new->key)) &&
+ !memcmp(&new->msk, &entry->msk, sizeof(new->msk)))
+ return entry->loc;
+ }
+
+ /* -1 means no duplicates */
+ return -1;
+}
+
+static int nfp_net_fs_add(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct nfp_fs_entry *new, *entry;
+ bool unsupp_mask;
+ int err, id;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ /* Only default RSS context(0) is supported. */
+ if ((fs->flow_type & FLOW_RSS) && cmd->rss_context)
+ return -EOPNOTSUPP;
+
+ if (fs->location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= nn->dp.num_rx_rings)
+ return -EINVAL;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV4_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip4_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip4_spec.tos ||
+ !!fs->m_u.usr_ip4_spec.ip_ver;
+ /* ip_ver must be ETH_RX_NFC_IP4. */
+ unsupp_mask |= fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4;
+ break;
+ case IPV6_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip6_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip6_spec.tclass;
+ break;
+ case ETHER_FLOW:
+ if (fs->h_u.ether_spec.h_proto == htons(ETH_P_IP) ||
+ fs->h_u.ether_spec.h_proto == htons(ETH_P_IPV6)) {
+ nn_err(nn, "Please use ip4/ip6 flow type instead.\n");
+ return -EOPNOTSUPP;
+ }
+ /* Only unmasked ethtype is supported. */
+ unsupp_mask = !is_zero_ether_addr(fs->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fs->m_u.ether_spec.h_source) ||
+ (fs->m_u.ether_spec.h_proto != htons(0xffff));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (unsupp_mask)
+ return -EOPNOTSUPP;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ nfp_net_fs_from_ethtool(new, fs);
+
+ id = nfp_net_fs_check_existing(nn, new);
+ if (id >= 0) {
+ nn_err(nn, "Identical rule is existing in %d.\n", id);
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Insert to list in ascending order of location. */
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == fs->location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ goto err;
+
+ nn->fs.count--;
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ nn->fs.count++;
+ list_replace(&entry->node, &new->node);
+ kfree(entry);
+
+ return 0;
+ }
+
+ if (entry->loc > fs->location)
+ break;
+ }
+
+ if (nn->fs.count == NFP_FS_MAX_ENTRY) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ list_add_tail(&new->node, &entry->node);
+ nn->fs.count++;
+
+ return 0;
+
+err:
+ kfree(new);
+ return err;
+}
+
+static int nfp_net_fs_del(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+ int err;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (!nn->fs.count || cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ return err;
+
+ list_del(&entry->node);
+ kfree(entry);
+ nn->fs.count--;
+
+ return 0;
+ } else if (entry->loc > cmd->fs.location) {
+ /* no need to continue */
+ break;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int nfp_net_set_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
@@ -1393,6 +1765,10 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
return nfp_net_set_rss_hash_opt(nn, cmd);
+ case ETHTOOL_SRXCLSRLINS:
+ return nfp_net_fs_add(nn, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return nfp_net_fs_del(nn, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1418,8 +1794,8 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
return nfp_net_rss_key_sz(nn);
}
-static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int nfp_net_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
@@ -1427,41 +1803,41 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
- indir[i] = nn->rss_itbl[i];
- if (key)
- memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
- if (hfunc) {
- *hfunc = nn->rss_hfunc;
- if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- }
+ rxfh->indir[i] = nn->rss_itbl[i];
+ if (rxfh->key)
+ memcpy(rxfh->key, nn->rss_key, nfp_net_rss_key_sz(nn));
+
+ rxfh->hfunc = nn->rss_hfunc;
+ if (rxfh->hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
+ rxfh->hfunc = ETH_RSS_HASH_UNKNOWN;
return 0;
}
static int nfp_net_set_rxfh(struct net_device *netdev,
- const u32 *indir, const u8 *key,
- const u8 hfunc)
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
- !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
+ !(rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE ||
+ rxfh->hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
- if (!key && !indir)
+ if (!rxfh->key && !rxfh->indir)
return 0;
- if (key) {
- memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
+ if (rxfh->key) {
+ memcpy(nn->rss_key, rxfh->key, nfp_net_rss_key_sz(nn));
nfp_net_rss_write_key(nn);
}
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
- nn->rss_itbl[i] = indir[i];
+ nn->rss_itbl[i] = rxfh->indir[i];
nfp_net_rss_write_itbl(nn);
}
@@ -1497,7 +1873,7 @@ static int nfp_net_get_coalesce(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
- return -EINVAL;
+ return -EOPNOTSUPP;
ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on;
ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on;
@@ -1776,22 +2152,40 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
*/
if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* ensure valid configuration */
- if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
+ if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rx-usecs and rx-frames cannot both be zero");
return -EINVAL;
+ }
- if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
+ if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-usecs and tx-frames cannot both be zero");
return -EINVAL;
+ }
- if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor,
- ec->rx_max_coalesced_frames))
+ if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx-usecs too large");
return -EINVAL;
+ }
- if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor,
- ec->tx_max_coalesced_frames))
+ if (nfp_net_coalesce_para_check(ec->rx_max_coalesced_frames)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx-frames too large");
return -EINVAL;
+ }
+
+ if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx-usecs too large");
+ return -EINVAL;
+ }
+
+ if (nfp_net_coalesce_para_check(ec->tx_max_coalesced_frames)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx-frames too large");
+ return -EINVAL;
+ }
/* configuration is valid */
nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce;
@@ -1866,6 +2260,30 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static int nfp_port_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg != AUTONEG_DISABLE)
+ return -EOPNOTSUPP;
+
+ err = nfp_eth_set_pauseparam(port->app->cpp, eth_port->index,
+ pause->tx_pause, pause->rx_pause);
+ if (!err)
+ /* Only refresh if we did something */
+ nfp_net_refresh_port_table(port);
+
+ return err < 0 ? err : 0;
+}
+
static void nfp_port_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1877,10 +2295,10 @@ static void nfp_port_get_pauseparam(struct net_device *netdev,
if (!eth_port)
return;
- /* Currently pause frame support is fixed */
+ /* Currently pause frame autoneg is fixed */
pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = 1;
- pause->tx_pause = 1;
+ pause->rx_pause = eth_port->rx_pause;
+ pause->tx_pause = eth_port->tx_pause;
}
static int nfp_net_set_phys_id(struct net_device *netdev,
@@ -2106,8 +2524,10 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .set_pauseparam = nfp_port_set_pauseparam,
.get_pauseparam = nfp_port_get_pauseparam,
.set_phys_id = nfp_net_set_phys_id,
+ .get_ts_info = ethtool_op_get_ts_info,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -2130,6 +2550,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .set_pauseparam = nfp_port_set_pauseparam,
.get_pauseparam = nfp_port_get_pauseparam,
.set_phys_id = nfp_net_set_phys_id,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 33b4c2856316..3f10c5365c80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
const u32 barcfg_msix_general =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
const u32 barcfg_msix_xpb =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
NFP_CPP_TARGET_ISLAND_XPB);
const u32 barcfg_explicit[4] = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 00264af13b49..dc0e405c1349 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -189,6 +189,8 @@ enum nfp_ethtool_link_mode_list {
* @ports.enabled: is enabled?
* @ports.tx_enabled: is TX enabled?
* @ports.rx_enabled: is RX enabled?
+ * @ports.rx_pause: Switch of RX pause frame
+ * @ports.tx_pause: Switch of Tx pause frame
* @ports.override_changed: is media reconfig pending?
*
* @ports.port_type: one of %PORT_* defines for ethtool
@@ -227,6 +229,8 @@ struct nfp_eth_table {
bool tx_enabled;
bool rx_enabled;
bool supp_aneg;
+ bool rx_pause;
+ bool tx_pause;
bool override_changed;
@@ -255,6 +259,8 @@ int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+int nfp_eth_set_pauseparam(struct nfp_cpp *cpp, unsigned int idx,
+ unsigned int tx_pause, unsigned int rx_pause);
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 9d62085d772a..5cfddc9a5d87 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -42,6 +42,8 @@
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28)
+#define NSP_ETH_STATE_TX_PAUSE BIT_ULL(31)
+#define NSP_ETH_STATE_RX_PAUSE BIT_ULL(32)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -52,6 +54,8 @@
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
+#define NSP_ETH_CTRL_SET_TX_PAUSE BIT_ULL(10)
+#define NSP_ETH_CTRL_SET_RX_PAUSE BIT_ULL(11)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -180,6 +184,15 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state);
dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 37) {
+ dst->tx_pause = true;
+ dst->rx_pause = true;
+ return;
+ }
+
+ dst->tx_pause = FIELD_GET(NSP_ETH_STATE_TX_PAUSE, state);
+ dst->rx_pause = FIELD_GET(NSP_ETH_STATE_RX_PAUSE, state);
}
static void
@@ -497,7 +510,7 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
static int
nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
const u64 mask, const unsigned int shift,
- unsigned int val, const u64 ctrl_bit)
+ u64 val, const u64 ctrl_bit)
{
union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
unsigned int idx = nfp_nsp_config_idx(nsp);
@@ -630,6 +643,81 @@ nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
}
/**
+ * __nfp_eth_set_txpause() - set tx pause control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @tx_pause: TX pause switch
+ *
+ * Set TX pause switch.
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_txpause(struct nfp_nsp *nsp, unsigned int tx_pause)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_TX_PAUSE,
+ tx_pause, NSP_ETH_CTRL_SET_TX_PAUSE);
+}
+
+/**
+ * __nfp_eth_set_rxpause() - set rx pause control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @rx_pause: RX pause switch
+ *
+ * Set RX pause switch.
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_rxpause(struct nfp_nsp *nsp, unsigned int rx_pause)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_RX_PAUSE,
+ rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
+}
+
+/**
+ * nfp_eth_set_pauseparam() - Set TX/RX pause switch.
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @tx_pause: TX pause switch
+ * @rx_pause: RX pause switch
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_pauseparam(struct nfp_cpp *cpp, unsigned int idx,
+ unsigned int tx_pause, unsigned int rx_pause)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 37) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set pause parameter operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ err = __nfp_eth_set_txpause(nsp, tx_pause);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ err = __nfp_eth_set_rxpause(nsp, rx_pause);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
* __nfp_eth_set_speed() - set interface speed/rate
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
* @speed: Desired speed (per lane)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 2453a40f6ee8..9ffef2e06885 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -91,6 +91,4 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
-const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr);
-
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index d6ce113a4210..6ba8d4aca0a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
bars[i].len = 0;
}
}
+ ionic->num_bars = 0;
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,9 +216,18 @@ out:
static void ionic_clear_pci(struct ionic *ionic)
{
- ionic_unmap_bars(ionic);
- pci_release_regions(ionic->pdev);
- pci_disable_device(ionic->pdev);
+ if (ionic->num_bars) {
+ ionic->idev.dev_info_regs = NULL;
+ ionic->idev.dev_cmd_regs = NULL;
+ ionic->idev.intr_status = NULL;
+ ionic->idev.intr_ctrl = NULL;
+
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ }
+
+ if (pci_is_enabled(ionic->pdev))
+ pci_disable_device(ionic->pdev);
}
static int ionic_setup_one(struct ionic *ionic)
@@ -389,9 +399,13 @@ static void ionic_remove(struct pci_dev *pdev)
{
struct ionic *ionic = pci_get_drvdata(pdev);
- del_timer_sync(&ionic->watchdog_timer);
+ timer_shutdown_sync(&ionic->watchdog_timer);
if (ionic->lif) {
+ /* prevent adminq cmds if already known as down */
+ if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
+ set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
+
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
@@ -416,6 +430,8 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
dev_dbg(ionic->dev, "%s: device stopping\n", __func__);
+ set_bit(IONIC_LIF_F_FW_RESET, lif->state);
+
del_timer_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
@@ -424,6 +440,7 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
ionic_txrx_free(lif);
ionic_lif_deinit(lif);
ionic_qcqs_free(lif);
+ ionic_debugfs_del_lif(lif);
mutex_unlock(&lif->queue_lock);
ionic_dev_teardown(ionic);
@@ -455,10 +472,35 @@ err_out:
__func__, err ? "failed" : "done");
}
+static pci_ers_result_t ionic_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ ionic_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void ionic_pci_error_resume(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct ionic_lif *lif = ionic->lif;
+
+ if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ pci_reset_function_locked(pdev);
+}
+
static const struct pci_error_handlers ionic_err_handler = {
/* FLR handling */
.reset_prepare = ionic_reset_prepare,
.reset_done = ionic_reset_done,
+
+ /* PCI bus error detected on this device */
+ .error_detected = ionic_pci_error_detected,
+ .resume = ionic_pci_error_resume,
+
};
static struct pci_driver ionic_driver = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index c58217027564..91327ef670c7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -287,6 +287,9 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif)
void ionic_debugfs_del_lif(struct ionic_lif *lif)
{
+ if (!lif->dentry)
+ return;
+
debugfs_remove_recursive(lif->dentry);
lif->dentry = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index c06576f43916..746072b4dbd0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -165,9 +165,19 @@ void ionic_dev_teardown(struct ionic *ionic)
}
/* Devcmd Interface */
-bool ionic_is_fw_running(struct ionic_dev *idev)
+static bool __ionic_is_fw_running(struct ionic_dev *idev, u8 *status_ptr)
{
- u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
+ u8 fw_status;
+
+ if (!idev->dev_info_regs) {
+ if (status_ptr)
+ *status_ptr = 0xff;
+ return false;
+ }
+
+ fw_status = ioread8(&idev->dev_info_regs->fw_status);
+ if (status_ptr)
+ *status_ptr = fw_status;
/* firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
@@ -175,6 +185,11 @@ bool ionic_is_fw_running(struct ionic_dev *idev)
return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
}
+bool ionic_is_fw_running(struct ionic_dev *idev)
+{
+ return __ionic_is_fw_running(idev, NULL);
+}
+
int ionic_heartbeat_check(struct ionic *ionic)
{
unsigned long check_time, last_check_time;
@@ -199,10 +214,8 @@ do_check_time:
goto do_check_time;
}
- fw_status = ioread8(&idev->dev_info_regs->fw_status);
-
/* If fw_status is not ready don't bother with the generation */
- if (!ionic_is_fw_running(idev)) {
+ if (!__ionic_is_fw_running(idev, &fw_status)) {
fw_status_ready = false;
} else {
fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
@@ -306,21 +319,32 @@ do_check_time:
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return (u8)PCI_ERROR_RESPONSE;
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
}
bool ionic_dev_cmd_done(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return false;
return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
}
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
{
+ if (!idev->dev_cmd_regs)
+ return;
memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
}
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
{
+ idev->opcode = cmd->cmd.opcode;
+
+ if (!idev->dev_cmd_regs)
+ return;
+
memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
iowrite32(0, &idev->dev_cmd_regs->done);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
@@ -469,46 +493,6 @@ int ionic_set_vf_config(struct ionic *ionic, int vf,
return err;
}
-int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
- struct ionic_vf_getattr_comp *comp)
-{
- union ionic_dev_cmd cmd = {
- .vf_getattr.opcode = IONIC_CMD_VF_GETATTR,
- .vf_getattr.attr = attr,
- .vf_getattr.vf_index = cpu_to_le16(vf),
- };
- int err;
-
- if (vf >= ionic->num_vfs)
- return -EINVAL;
-
- switch (attr) {
- case IONIC_VF_ATTR_SPOOFCHK:
- case IONIC_VF_ATTR_TRUST:
- case IONIC_VF_ATTR_LINKSTATE:
- case IONIC_VF_ATTR_MAC:
- case IONIC_VF_ATTR_VLAN:
- case IONIC_VF_ATTR_RATE:
- break;
- case IONIC_VF_ATTR_STATSADDR:
- default:
- return -EINVAL;
- }
-
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_go(&ionic->idev, &cmd);
- err = ionic_dev_cmd_wait_nomsg(ionic, DEVCMD_TIMEOUT);
- memcpy_fromio(comp, &ionic->idev.dev_cmd_regs->comp.vf_getattr,
- sizeof(*comp));
- mutex_unlock(&ionic->dev_cmd_lock);
-
- if (err && comp->status != IONIC_RC_ENOSUPP)
- ionic_dev_cmd_dev_err_print(ionic, cmd.vf_getattr.opcode,
- comp->status, err);
-
- return err;
-}
-
void ionic_vf_start(struct ionic *ionic)
{
union ionic_dev_cmd cmd = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 9b5463040075..2667e1cde16b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -153,6 +153,7 @@ struct ionic_dev {
bool fw_hb_ready;
bool fw_status_ready;
u8 fw_generation;
+ u8 opcode;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -269,12 +270,12 @@ struct ionic_queue {
struct ionic_intr_info {
char name[IONIC_INTR_NAME_MAX_SZ];
+ u64 rearm_count;
unsigned int index;
unsigned int vector;
- u64 rearm_count;
unsigned int cpu;
- cpumask_t affinity_mask;
u32 dim_coal_hw;
+ cpumask_t affinity_mask;
};
struct ionic_cq {
@@ -341,8 +342,7 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf,
struct ionic_vf_setattr_cmd *vfc);
-int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
- struct ionic_vf_getattr_comp *comp);
+
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
void ionic_vf_start(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3a6b0a9bc241..0ffc9c4904ac 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev;
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ idev = &lif->ionic->idev;
+ if (!idev->dev_info_regs)
+ return;
+
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
}
static void ionic_get_link_ext_stats(struct net_device *netdev,
@@ -823,36 +828,38 @@ static u32 ionic_get_rxfh_key_size(struct net_device *netdev)
return IONIC_RSS_HASH_KEY_SIZE;
}
-static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ionic_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ionic_lif *lif = netdev_priv(netdev);
unsigned int i, tbl_sz;
- if (indir) {
+ if (rxfh->indir) {
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
for (i = 0; i < tbl_sz; i++)
- indir[i] = lif->rss_ind_tbl[i];
+ rxfh->indir[i] = lif->rss_ind_tbl[i];
}
- if (key)
- memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
+ if (rxfh->key)
+ memcpy(rxfh->key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ionic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- return ionic_lif_rss_config(lif, lif->rss_types, key, indir);
+ return ionic_lif_rss_config(lif, lif->rss_types,
+ rxfh->key, rxfh->indir);
}
static int ionic_set_tunable(struct net_device *dev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index 5f40324cd243..3c209c1a2337 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ if (!idev->dev_cmd_regs) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index bad919343180..fcb44ceeb6aa 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -424,14 +424,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_qcq_intr_free(lif, qcq);
- if (qcq->cq.info) {
- vfree(qcq->cq.info);
- qcq->cq.info = NULL;
- }
- if (qcq->q.info) {
- vfree(qcq->q.info);
- qcq->q.info = NULL;
- }
+ vfree(qcq->cq.info);
+ qcq->cq.info = NULL;
+ vfree(qcq->q.info);
+ qcq->q.info = NULL;
}
void ionic_qcqs_free(struct ionic_lif *lif)
@@ -2332,82 +2328,11 @@ static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd
}
}
-static int ionic_get_fw_vf_config(struct ionic *ionic, int vf, struct ionic_vf *vfdata)
-{
- struct ionic_vf_getattr_comp comp = { 0 };
- int err;
- u8 attr;
-
- attr = IONIC_VF_ATTR_VLAN;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->vlanid = comp.vlanid;
-
- attr = IONIC_VF_ATTR_SPOOFCHK;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->spoofchk = comp.spoofchk;
-
- attr = IONIC_VF_ATTR_LINKSTATE;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err) {
- switch (comp.linkstate) {
- case IONIC_VF_LINK_STATUS_UP:
- vfdata->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- break;
- case IONIC_VF_LINK_STATUS_DOWN:
- vfdata->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- break;
- case IONIC_VF_LINK_STATUS_AUTO:
- vfdata->linkstate = IFLA_VF_LINK_STATE_AUTO;
- break;
- default:
- dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
- break;
- }
- }
-
- attr = IONIC_VF_ATTR_RATE;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->maxrate = comp.maxrate;
-
- attr = IONIC_VF_ATTR_TRUST;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->trusted = comp.trust;
-
- attr = IONIC_VF_ATTR_MAC;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- ether_addr_copy(vfdata->macaddr, comp.macaddr);
-
-err_out:
- if (err)
- dev_err(ionic->dev, "Failed to get %s for VF %d\n",
- ionic_vf_attr_to_str(attr), vf);
-
- return err;
-}
-
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- struct ionic_vf vfdata = { 0 };
int ret = 0;
if (!netif_device_present(netdev))
@@ -2418,18 +2343,16 @@ static int ionic_get_vf_config(struct net_device *netdev,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ivf->vf = vf;
- ivf->qos = 0;
-
- ret = ionic_get_fw_vf_config(ionic, vf, &vfdata);
- if (!ret) {
- ivf->vlan = le16_to_cpu(vfdata.vlanid);
- ivf->spoofchk = vfdata.spoofchk;
- ivf->linkstate = vfdata.linkstate;
- ivf->max_tx_rate = le32_to_cpu(vfdata.maxrate);
- ivf->trusted = vfdata.trusted;
- ether_addr_copy(ivf->mac, vfdata.macaddr);
- }
+ struct ionic_vf *vfdata = &ionic->vfs[vf];
+
+ ivf->vf = vf;
+ ivf->qos = 0;
+ ivf->vlan = le16_to_cpu(vfdata->vlanid);
+ ivf->spoofchk = vfdata->spoofchk;
+ ivf->linkstate = vfdata->linkstate;
+ ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate);
+ ivf->trusted = vfdata->trusted;
+ ether_addr_copy(ivf->mac, vfdata->macaddr);
}
up_read(&ionic->vf_op_lock);
@@ -3127,6 +3050,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif = netdev_priv(netdev);
lif->netdev = netdev;
ionic->lif = lif;
+ lif->ionic = ionic;
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
@@ -3149,7 +3073,6 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
- lif->ionic = ionic;
lif->index = 0;
if (is_kdump_kernel()) {
@@ -3238,6 +3161,9 @@ static void ionic_lif_reset(struct ionic_lif *lif)
{
struct ionic_dev *idev = &lif->ionic->idev;
+ if (!ionic_is_fw_running(idev))
+ return;
+
mutex_lock(&lif->ionic->dev_cmd_lock);
ionic_dev_cmd_lif_reset(idev, lif->index);
ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
@@ -3633,7 +3559,10 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
}
- err = ionic_init_nic_features(lif);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ err = ionic_set_nic_features(lif, lif->netdev->features);
+ else
+ err = ionic_init_nic_features(lif);
if (err)
goto err_out_notifyq_deinit;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 457c24195ca6..61548b3eea93 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -312,6 +312,11 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
return (usecs * mult) / div;
}
+static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
+{
+ return unlikely(q->features & IONIC_TXQ_F_HWSTAMP);
+}
+
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 835577392178..2f479de329fe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -188,28 +188,6 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
}
}
-const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr)
-{
- switch (attr) {
- case IONIC_VF_ATTR_SPOOFCHK:
- return "IONIC_VF_ATTR_SPOOFCHK";
- case IONIC_VF_ATTR_TRUST:
- return "IONIC_VF_ATTR_TRUST";
- case IONIC_VF_ATTR_LINKSTATE:
- return "IONIC_VF_ATTR_LINKSTATE";
- case IONIC_VF_ATTR_MAC:
- return "IONIC_VF_ATTR_MAC";
- case IONIC_VF_ATTR_VLAN:
- return "IONIC_VF_ATTR_VLAN";
- case IONIC_VF_ATTR_RATE:
- return "IONIC_VF_ATTR_RATE";
- case IONIC_VF_ATTR_STATSADDR:
- return "IONIC_VF_ATTR_STATSADDR";
- default:
- return "IONIC_VF_ATTR_UNKNOWN";
- }
-}
-
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_desc_info *desc_info;
@@ -410,28 +388,37 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
do_msg);
}
-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+static int __ionic_adminq_post_wait(struct ionic_lif *lif,
+ struct ionic_admin_ctx *ctx,
+ const bool do_msg)
{
int err;
+ if (!ionic_is_fw_running(&lif->ionic->idev))
+ return 0;
+
err = ionic_adminq_post(lif, ctx);
- return ionic_adminq_wait(lif, ctx, err, true);
+ return ionic_adminq_wait(lif, ctx, err, do_msg);
}
-int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- int err;
-
- err = ionic_adminq_post(lif, ctx);
+ return __ionic_adminq_post_wait(lif, ctx, true);
+}
- return ionic_adminq_wait(lif, ctx, err, false);
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ return __ionic_adminq_post_wait(lif, ctx, false);
}
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
+ if (!idev->dev_cmd_regs)
+ return;
+
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
@@ -465,7 +452,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
- opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
+ opcode = idev->opcode;
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
!done && fw_up && time_before(jiffies, max_wait);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 9859a4432985..1f6022fb7679 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -258,10 +258,10 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
int i, q_num;
for (i = 0; i < IONIC_NUM_LIF_STATS; i++)
- ethtool_sprintf(buf, ionic_lif_stats_desc[i].name);
+ ethtool_puts(buf, ionic_lif_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_PORT_STATS; i++)
- ethtool_sprintf(buf, ionic_port_stats_desc[i].name);
+ ethtool_puts(buf, ionic_port_stats_desc[i].name);
for (q_num = 0; q_num < MAX_Q(lif); q_num++)
ionic_sw_stats_get_tx_strings(lif, buf, q_num);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ccc1b1d407e4..6f4776759863 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -579,6 +579,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done = ionic_cq_service(cq, budget,
ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
+
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -607,6 +610,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
+ if (unlikely(!budget))
+ return budget;
+
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
@@ -656,6 +662,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
+
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
@@ -803,7 +812,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
qi = skb_get_queue_mapping(skb);
- if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
+ if (ionic_txq_hwstamp_enabled(q)) {
if (cq_info) {
struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp;
@@ -870,7 +879,7 @@ bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
- if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
return true;
@@ -908,7 +917,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info->cb_arg = NULL;
}
- if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
}
@@ -986,7 +995,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
if (start) {
skb_tx_timestamp(skb);
- if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (!ionic_txq_hwstamp_enabled(q))
netdev_tx_sent_queue(q_to_ndq(q), skb->len);
ionic_txq_post(q, false, ionic_tx_clean, skb);
} else {
@@ -1233,7 +1242,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (!ionic_txq_hwstamp_enabled(q))
netdev_tx_sent_queue(q_to_ndq(q), skb->len);
ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b6b849a079ed..0e240b5ab8d4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1370,28 +1370,29 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev)
return sizeof(edev->rss_key);
}
-static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int qede_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct qede_dev *edev = netdev_priv(dev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
- indir[i] = edev->rss_ind_table[i];
+ rxfh->indir[i] = edev->rss_ind_table[i];
- if (key)
- memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
+ if (rxfh->key)
+ memcpy(rxfh->key, edev->rss_key, qede_get_rxfh_key_size(dev));
return 0;
}
-static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int qede_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
struct qede_dev *edev = netdev_priv(dev);
@@ -1403,20 +1404,21 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
return -EOPNOTSUPP;
}
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir && !key)
+ if (!rxfh->indir && !rxfh->key)
return 0;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
- edev->rss_ind_table[i] = indir[i];
+ edev->rss_ind_table[i] = rxfh->indir[i];
edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
}
- if (key) {
- memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
+ if (rxfh->key) {
+ memcpy(&edev->rss_key, rxfh->key, qede_get_rxfh_key_size(dev));
edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index c95d56e56c59..b733374b4dc5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2092,8 +2092,8 @@ static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
- QLC_FW_FILE_NAME_LEN);
+ strscpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
+ sizeof(fw_info->fw_file_name));
ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
if (ret) {
@@ -2396,12 +2396,12 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
switch (pdev->device) {
case PCI_DEVICE_ID_QLOGIC_QLE834X:
case PCI_DEVICE_ID_QLOGIC_QLE8830:
- strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
- QLC_FW_FILE_NAME_LEN);
+ strscpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ sizeof(fw_info->fw_file_name));
break;
case PCI_DEVICE_ID_QLOGIC_QLE844X:
- strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
- QLC_FW_FILE_NAME_LEN);
+ strscpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ sizeof(fw_info->fw_file_name));
break;
default:
dev_err(&pdev->dev, "%s: Invalid device id\n",
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 9adec91f35e9..223321897b96 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -58,9 +58,8 @@ struct qcauart {
unsigned char *tx_buffer;
};
-static int
-qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
- size_t count)
+static ssize_t
+qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
{
struct qcauart *qca = serdev_device_get_drvdata(serdev);
struct net_device *netdev = qca->net_dev;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 39d24e07f306..5b69b9268c75 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -396,7 +396,7 @@ nla_put_failure:
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.kind = "rmnet",
- .maxtype = __IFLA_RMNET_MAX,
+ .maxtype = IFLA_RMNET_MAX,
.priv_size = sizeof(struct rmnet_priv),
.setup = rmnet_vnd_setup,
.validate = rmnet_rtnl_validate,
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 93d9df55b361..03015b665f4e 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -113,4 +113,11 @@ config R8169
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
+config R8169_LEDS
+ def_bool R8169 && LEDS_TRIGGER_NETDEV
+ depends on !(R8169=y && LEDS_CLASS=m)
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 2e1d78b106b0..635491d8826e 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,5 +6,6 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
-r8169-objs += r8169_main.o r8169_firmware.o r8169_phy_config.o
+r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
+r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 55ef8251feb5..81567fcf3957 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -8,6 +8,7 @@
* See MAINTAINERS file for support contact information.
*/
+#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/phy.h>
@@ -77,3 +78,9 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr);
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver);
+
+void r8169_get_led_name(struct rtl8169_private *tp, int idx,
+ char *buf, int buf_len);
+int rtl8168_get_led_mode(struct rtl8169_private *tp);
+int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
+void rtl8168_init_leds(struct net_device *ndev);
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index cbc6b846ded5..ed6e721b1555 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -151,9 +151,6 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
u32 regno = (action & 0x0fff0000) >> 16;
enum rtl_fw_opcode opcode = action >> 28;
- if (!action)
- break;
-
switch (opcode) {
case PHY_READ:
predata = fw_read(tp, regno);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
new file mode 100644
index 000000000000..007d077edcad
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* r8169_leds.c: Realtek 8169/8168/8101/8125 ethernet driver.
+ *
+ * Copyright (c) 2023 Heiner Kallweit <hkallweit1@gmail.com>
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/uleds.h>
+
+#include "r8169.h"
+
+#define RTL8168_LED_CTRL_OPTION2 BIT(15)
+#define RTL8168_LED_CTRL_ACT BIT(3)
+#define RTL8168_LED_CTRL_LINK_1000 BIT(2)
+#define RTL8168_LED_CTRL_LINK_100 BIT(1)
+#define RTL8168_LED_CTRL_LINK_10 BIT(0)
+
+#define RTL8168_NUM_LEDS 3
+
+#define RTL8168_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \
+ BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \
+ BIT(TRIGGER_NETDEV_TX))
+
+struct r8169_led_classdev {
+ struct led_classdev led;
+ struct net_device *ndev;
+ int index;
+};
+
+#define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led)
+
+static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int shift = ldev->index * 4;
+ bool rx, tx;
+
+ if (flags & ~RTL8168_SUPPORTED_MODES)
+ goto nosupp;
+
+ rx = flags & BIT(TRIGGER_NETDEV_RX);
+ tx = flags & BIT(TRIGGER_NETDEV_TX);
+ if (rx != tx)
+ goto nosupp;
+
+ return 0;
+
+nosupp:
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
+ return -EOPNOTSUPP;
+}
+
+static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int shift = ldev->index * 4;
+ u16 mode = 0;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode |= RTL8168_LED_CTRL_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode |= RTL8168_LED_CTRL_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode |= RTL8168_LED_CTRL_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_TX))
+ mode |= RTL8168_LED_CTRL_ACT;
+
+ return rtl8168_led_mod_ctrl(tp, 0x000f << shift, mode << shift);
+}
+
+static int rtl8168_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int shift = ldev->index * 4;
+ int mode;
+
+ mode = rtl8168_get_led_mode(tp);
+ if (mode < 0)
+ return mode;
+
+ if (mode & RTL8168_LED_CTRL_OPTION2) {
+ rtl8168_led_mod_ctrl(tp, RTL8168_LED_CTRL_OPTION2, 0);
+ netdev_notice(ldev->ndev, "Deactivating unsupported Option2 LED mode\n");
+ }
+
+ mode = (mode >> shift) & 0x000f;
+
+ if (mode & RTL8168_LED_CTRL_ACT)
+ *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+
+ if (mode & RTL8168_LED_CTRL_LINK_10)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_10);
+ if (mode & RTL8168_LED_CTRL_LINK_100)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_100);
+ if (mode & RTL8168_LED_CTRL_LINK_1000)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ return 0;
+}
+
+static struct device *
+ r8169_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+
+ return &ldev->ndev->dev;
+}
+
+static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
+ struct net_device *ndev, int index)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->ndev = ndev;
+ ldev->index = index;
+
+ r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->default_trigger = "netdev";
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported;
+ led_cdev->hw_control_set = rtl8168_led_hw_control_set;
+ led_cdev->hw_control_get = rtl8168_led_hw_control_get;
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+ devm_led_classdev_register(&ndev->dev, led_cdev);
+}
+
+void rtl8168_init_leds(struct net_device *ndev)
+{
+ /* bind resource mgmt to netdev */
+ struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+ leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return;
+
+ for (i = 0; i < RTL8168_NUM_LEDS; i++)
+ rtl8168_setup_ldev(leds + i, ndev, i);
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 81fd31f6fac4..dd73df6b17b0 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -56,10 +56,6 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
-/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
- The RTL chips use a 64 element hash table based on the Ethernet CRC. */
-#define MC_FILTER_LIMIT 32
-
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -289,6 +285,7 @@ enum rtl8168_8101_registers {
};
enum rtl8168_registers {
+ LED_CTRL = 0x18,
LED_FREQ = 0x1a,
EEE_LED = 0x1b,
ERIDR = 0x70,
@@ -620,6 +617,7 @@ struct rtl8169_private {
raw_spinlock_t config25_lock;
raw_spinlock_t mac_ocp_lock;
+ struct mutex led_lock; /* serialize LED ctrl RMW access */
raw_spinlock_t cfg9346_usage_lock;
int cfg9346_usage_count;
@@ -792,6 +790,62 @@ static const struct rtl_cond name = { \
\
static bool name ## _check(struct rtl8169_private *tp)
+int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val)
+{
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->led_lock);
+ RTL_W16(tp, LED_CTRL, (RTL_R16(tp, LED_CTRL) & ~mask) | val);
+ mutex_unlock(&tp->led_lock);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int rtl8168_get_led_mode(struct rtl8169_private *tp)
+{
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = RTL_R16(tp, LED_CTRL);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
+void r8169_get_led_name(struct rtl8169_private *tp, int idx,
+ char *buf, int buf_len)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ char pdom[8], pfun[8];
+ int domain;
+
+ domain = pci_domain_nr(pdev->bus);
+ if (domain)
+ snprintf(pdom, sizeof(pdom), "P%d", domain);
+ else
+ pdom[0] = '\0';
+
+ if (pdev->multifunction)
+ snprintf(pfun, sizeof(pfun), "f%d", PCI_FUNC(pdev->devfn));
+ else
+ pfun[0] = '\0';
+
+ snprintf(buf, buf_len, "en%sp%ds%d%s-%d::lan", pdom, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), pfun, idx);
+}
+
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
@@ -2233,6 +2287,9 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
+ if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ return;
+
set_bit(flag, tp->wk.flags);
schedule_work(&tp->wk.work);
}
@@ -2603,8 +2660,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode |= AcceptAllPhys;
} else if (!(dev->flags & IFF_MULTICAST)) {
rx_mode &= ~AcceptMulticast;
- } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
- dev->flags & IFF_ALLMULTI ||
+ } else if (dev->flags & IFF_ALLMULTI ||
tp->mac_version == RTL_GIGA_MAC_VER_35) {
/* accept all multicasts */
} else if (netdev_mc_empty(dev)) {
@@ -3106,6 +3162,33 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168g_2);
}
+static void rtl8411b_fix_phy_down(struct rtl8169_private *tp)
+{
+ static const u16 fix_data[] = {
+/* 0xf800 */ 0xe008, 0xe00a, 0xe00c, 0xe00e, 0xe027, 0xe04f, 0xe05e, 0xe065,
+/* 0xf810 */ 0xc602, 0xbe00, 0x0000, 0xc502, 0xbd00, 0x074c, 0xc302, 0xbb00,
+/* 0xf820 */ 0x080a, 0x6420, 0x48c2, 0x8c20, 0xc516, 0x64a4, 0x49c0, 0xf009,
+/* 0xf830 */ 0x74a2, 0x8ca5, 0x74a0, 0xc50e, 0x9ca2, 0x1c11, 0x9ca0, 0xe006,
+/* 0xf840 */ 0x74f8, 0x48c4, 0x8cf8, 0xc404, 0xbc00, 0xc403, 0xbc00, 0x0bf2,
+/* 0xf850 */ 0x0c0a, 0xe434, 0xd3c0, 0x49d9, 0xf01f, 0xc526, 0x64a5, 0x1400,
+/* 0xf860 */ 0xf007, 0x0c01, 0x8ca5, 0x1c15, 0xc51b, 0x9ca0, 0xe013, 0xc519,
+/* 0xf870 */ 0x74a0, 0x48c4, 0x8ca0, 0xc516, 0x74a4, 0x48c8, 0x48ca, 0x9ca4,
+/* 0xf880 */ 0xc512, 0x1b00, 0x9ba0, 0x1b1c, 0x483f, 0x9ba2, 0x1b04, 0xc508,
+/* 0xf890 */ 0x9ba0, 0xc505, 0xbd00, 0xc502, 0xbd00, 0x0300, 0x051e, 0xe434,
+/* 0xf8a0 */ 0xe018, 0xe092, 0xde20, 0xd3c0, 0xc50f, 0x76a4, 0x49e3, 0xf007,
+/* 0xf8b0 */ 0x49c0, 0xf103, 0xc607, 0xbe00, 0xc606, 0xbe00, 0xc602, 0xbe00,
+/* 0xf8c0 */ 0x0c4c, 0x0c28, 0x0c2c, 0xdc00, 0xc707, 0x1d00, 0x8de2, 0x48c1,
+/* 0xf8d0 */ 0xc502, 0xbd00, 0x00aa, 0xe0c0, 0xc502, 0xbd00, 0x0132
+ };
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(fix_data); i++)
+ __r8168_mac_ocp_write(tp, 0xf800 + 2 * i, fix_data[i]);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+}
+
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8411_2[] = {
@@ -3139,117 +3222,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
mdelay(3);
r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
- r8168_mac_ocp_write(tp, 0xF800, 0xE008);
- r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
- r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
- r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
- r8168_mac_ocp_write(tp, 0xF808, 0xE027);
- r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
- r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
- r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
- r8168_mac_ocp_write(tp, 0xF810, 0xC602);
- r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF814, 0x0000);
- r8168_mac_ocp_write(tp, 0xF816, 0xC502);
- r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
- r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
- r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
- r8168_mac_ocp_write(tp, 0xF820, 0x080A);
- r8168_mac_ocp_write(tp, 0xF822, 0x6420);
- r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
- r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
- r8168_mac_ocp_write(tp, 0xF828, 0xC516);
- r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
- r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
- r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
- r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
- r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
- r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
- r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
- r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
- r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
- r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
- r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
- r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
- r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
- r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
- r8168_mac_ocp_write(tp, 0xF846, 0xC404);
- r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
- r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
- r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
- r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
- r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
- r8168_mac_ocp_write(tp, 0xF852, 0xE434);
- r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
- r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
- r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
- r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
- r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
- r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
- r8168_mac_ocp_write(tp, 0xF860, 0xF007);
- r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
- r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
- r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
- r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
- r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
- r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
- r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
- r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
- r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
- r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
- r8168_mac_ocp_write(tp, 0xF876, 0xC516);
- r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
- r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
- r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
- r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
- r8168_mac_ocp_write(tp, 0xF880, 0xC512);
- r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
- r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
- r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
- r8168_mac_ocp_write(tp, 0xF888, 0x483F);
- r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
- r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
- r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
- r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
- r8168_mac_ocp_write(tp, 0xF892, 0xC505);
- r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF896, 0xC502);
- r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
- r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
- r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
- r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
- r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
- r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
- r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
- r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
- r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
- r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
- r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
- r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
- r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
- r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
- r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
- r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
- r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
- r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
- r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
- r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
- r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
- r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
- r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
- r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
- r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
- r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
- r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
- r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
- r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
+ rtl8411b_fix_phy_down(tp);
r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
@@ -4561,8 +4534,7 @@ static void rtl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(tp->dev) ||
- !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
@@ -5227,6 +5199,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
raw_spin_lock_init(&tp->cfg9346_usage_lock);
raw_spin_lock_init(&tp->config25_lock);
raw_spin_lock_init(&tp->mac_ocp_lock);
+ mutex_init(&tp->led_lock);
dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
struct pcpu_sw_netstats);
@@ -5383,6 +5356,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ if (IS_ENABLED(CONFIG_R8169_LEDS) &&
+ tp->mac_version > RTL_GIGA_MAC_VER_06 &&
+ tp->mac_version < RTL_GIGA_MAC_VER_61)
+ rtl8168_init_leds(dev);
+
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 8ef5b0241e64..d6136fe5c206 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -40,11 +40,21 @@ config RAVB
config RENESAS_ETHER_SWITCH
tristate "Renesas Ethernet Switch support"
depends on ARCH_RENESAS || COMPILE_TEST
- depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PTP_1588_CLOCK
select CRC32
select MII
select PHYLINK
+ select RENESAS_GEN4_PTP
help
Renesas Ethernet Switch device driver.
+config RENESAS_GEN4_PTP
+ tristate "Renesas R-Car Gen4 gPTP support" if COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select MII
+ select PHYLIB
+ help
+ Renesas R-Car Gen4 gPTP device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index e8fd85b5fe8f..9070acfd6aaf 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
-rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o
-obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o
+obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
+
+obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8649b3e90edb..f7566cfa45ca 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -772,29 +772,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
struct ravb_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ int rx_packets = 0;
u8 desc_status;
- int boguscnt;
u16 pkt_len;
u8 die_dt;
int entry;
int limit;
+ int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
desc = &priv->gbeth_rx_ring[entry];
- while (desc->die_dt != DT_FEMPTY) {
+ for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -820,7 +816,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
case DT_FSTART:
@@ -848,7 +844,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
eth_type_trans(priv->rx_1st_skb, ndev);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
}
@@ -887,9 +883,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -1949,7 +1945,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
- u32 dma_addr;
+ dma_addr_t dma_addr;
void *buffer;
u32 entry;
u32 len;
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index c007e33c47e1..72e7fcc56693 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -14,7 +14,7 @@
#include "rcar_gen4_ptp.h"
#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset s4_offs = {
+static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
.enable = PTPTMEC,
.disable = PTPTMDC,
.increment = PTPTIVC0,
@@ -130,25 +130,42 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static void rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
+static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
+ enum rcar_gen4_ptp_reg_layout layout)
{
- WARN_ON(layout != RCAR_GEN4_PTP_REG_LAYOUT_S4);
+ if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
+ return -EINVAL;
- ptp_priv->offs = &s4_offs;
+ ptp_priv->offs = &gen4_offs;
+
+ return 0;
+}
+
+static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
+{
+ /* Timer increment in ns.
+ * bit[31:27] - integer
+ * bit[26:0] - decimal
+ * increment[ns] = perid[ns] * 2^27 => (1ns * 2^27) / rate[hz]
+ */
+ return div_s64(1000000000LL << 27, rate);
}
int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 clock)
+ enum rcar_gen4_ptp_reg_layout layout, u32 rate)
{
+ int ret;
+
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- rcar_gen4_ptp_set_offs(ptp_priv, layout);
+ ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
+ if (ret)
+ return ret;
- ptp_priv->default_addend = clock;
+ ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
@@ -159,6 +176,7 @@ int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
return 0;
}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
@@ -166,6 +184,7 @@ int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
return ptp_clock_unregister(ptp_priv->clock);
}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_unregister);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
{
@@ -179,3 +198,8 @@ struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
return ptp;
}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_alloc);
+
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas R-Car Gen4 gPTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index b1bbea8d3a52..e22da5acd53d 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -9,13 +9,10 @@
#include <linux/ptp_clock_kernel.h>
-#define PTPTIVC_INIT 0x19000000 /* 320MHz */
-#define RCAR_GEN4_PTP_CLOCK_S4 PTPTIVC_INIT
#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-/* for rcar_gen4_ptp_init */
enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT_S4
+ RCAR_GEN4_PTP_REG_LAYOUT
};
/* driver's definitions */
@@ -28,7 +25,7 @@ enum rcar_gen4_ptp_reg_layout {
#define PTPRO 0
-enum rcar_gen4_ptp_reg_s4 {
+enum rcar_gen4_ptp_reg {
PTPTMEC = PTPRO + 0x0010,
PTPTMDC = PTPRO + 0x0014,
PTPTIVC0 = PTPRO + 0x0020,
@@ -65,7 +62,7 @@ struct rcar_gen4_ptp_private {
};
int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 clock);
+ enum rcar_gen4_ptp_reg_layout layout, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index e77c6ff93d81..dcab638c57fe 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -56,7 +56,8 @@ static void rswitch_clock_disable(struct rswitch_private *priv)
iowrite32(RCDC_RCD, priv->addr + RCDC);
}
-static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
+static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
+ unsigned int port)
{
u32 val = ioread32(coma_addr + RCEC);
@@ -66,7 +67,8 @@ static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
return false;
}
-static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
+static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
+ int enable)
{
u32 val;
@@ -100,7 +102,7 @@ static void rswitch_coma_init(struct rswitch_private *priv)
/* R-Switch-2 block (TOP) */
static void rswitch_top_init(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
@@ -109,7 +111,7 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
/* For ETHA */
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
@@ -166,7 +168,7 @@ static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
{
u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
if (dis[i] & mask[i])
@@ -178,7 +180,7 @@ static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool
static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
{
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
dis[i] = ioread32(priv->addr + GWDIS(i));
@@ -186,23 +188,26 @@ static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
}
}
-static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
+static void rswitch_enadis_data_irq(struct rswitch_private *priv,
+ unsigned int index, bool enable)
{
u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
iowrite32(BIT(index % 32), priv->addr + offs);
}
-static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
+static void rswitch_ack_data_irq(struct rswitch_private *priv,
+ unsigned int index)
{
u32 offs = GWDIS(index / 32);
iowrite32(BIT(index % 32), priv->addr + offs);
}
-static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
+static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
+ bool cur, unsigned int num)
{
- int index = cur ? gq->cur : gq->dirty;
+ unsigned int index = cur ? gq->cur : gq->dirty;
if (index + num >= gq->ring_size)
index = (index + num) % gq->ring_size;
@@ -212,7 +217,7 @@ static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int
return index;
}
-static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
+static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
{
if (gq->cur >= gq->dirty)
return gq->cur - gq->dirty;
@@ -230,28 +235,28 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
return false;
}
-static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
- int start_index, int num)
+static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
+ unsigned int start_index,
+ unsigned int num)
{
- int i, index;
+ unsigned int i, index;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
- if (gq->skbs[index])
+ if (gq->rx_bufs[index])
continue;
- gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
- PKT_BUF_SZ + RSWITCH_ALIGN - 1);
- if (!gq->skbs[index])
+ gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
+ if (!gq->rx_bufs[index])
goto err;
}
return 0;
err:
- for (i--; i >= 0; i--) {
+ for (; i-- > 0; ) {
index = (i + start_index) % gq->ring_size;
- dev_kfree_skb(gq->skbs[index]);
- gq->skbs[index] = NULL;
+ skb_free_frag(gq->rx_bufs[index]);
+ gq->rx_bufs[index] = NULL;
}
return -ENOMEM;
@@ -260,7 +265,7 @@ err:
static void rswitch_gwca_queue_free(struct net_device *ndev,
struct rswitch_gwca_queue *gq)
{
- int i;
+ unsigned int i;
if (!gq->dir_tx) {
dma_free_coherent(ndev->dev.parent,
@@ -269,16 +274,19 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
gq->rx_ring = NULL;
for (i = 0; i < gq->ring_size; i++)
- dev_kfree_skb(gq->skbs[i]);
+ skb_free_frag(gq->rx_bufs[i]);
+ kfree(gq->rx_bufs);
+ gq->rx_bufs = NULL;
} else {
dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
gq->tx_ring = NULL;
+ kfree(gq->skbs);
+ gq->skbs = NULL;
+ kfree(gq->unmap_addrs);
+ gq->unmap_addrs = NULL;
}
-
- kfree(gq->skbs);
- gq->skbs = NULL;
}
static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
@@ -294,25 +302,31 @@ static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
static int rswitch_gwca_queue_alloc(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq,
- bool dir_tx, int ring_size)
+ bool dir_tx, unsigned int ring_size)
{
- int i, bit;
+ unsigned int i, bit;
gq->dir_tx = dir_tx;
gq->ring_size = ring_size;
gq->ndev = ndev;
- gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
- if (!gq->skbs)
- return -ENOMEM;
-
if (!dir_tx) {
- rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
+ gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
+ if (!gq->rx_bufs)
+ return -ENOMEM;
+ if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
+ goto out;
gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
} else {
+ gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
+ if (!gq->skbs)
+ return -ENOMEM;
+ gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
+ if (!gq->unmap_addrs)
+ goto out;
gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
@@ -351,22 +365,23 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
struct rswitch_ext_desc *desc;
struct rswitch_desc *linkfix;
dma_addr_t dma_addr;
- int i;
+ unsigned int i;
memset(gq->tx_ring, 0, ring_size);
for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
- gq->skbs[i]->data, PKT_BUF_SZ,
+ gq->rx_bufs[i] + RSWITCH_HEADROOM,
+ RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err;
- desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+ desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr);
desc->desc.die_dt = DT_FEMPTY | DIE;
} else {
@@ -387,10 +402,10 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
err:
if (!gq->dir_tx) {
- for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
+ for (desc = gq->tx_ring; i-- > 0; desc++) {
dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
}
}
@@ -398,11 +413,12 @@ err:
}
static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
- int start_index, int num)
+ unsigned int start_index,
+ unsigned int num)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
struct rswitch_ts_desc *desc;
- int i, index;
+ unsigned int i, index;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
@@ -413,24 +429,26 @@ static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
struct rswitch_gwca_queue *gq,
- int start_index, int num)
+ unsigned int start_index,
+ unsigned int num)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_ext_ts_desc *desc;
+ unsigned int i, index;
dma_addr_t dma_addr;
- int i, index;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index];
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
- gq->skbs[index]->data, PKT_BUF_SZ,
+ gq->rx_bufs[index] + RSWITCH_HEADROOM,
+ RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err;
- desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+ desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr);
dma_wmb();
desc->desc.die_dt = DT_FEMPTY | DIE;
@@ -444,12 +462,12 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
err:
if (!gq->dir_tx) {
- for (i--; i >= 0; i--) {
+ for (; i-- > 0; ) {
index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
}
}
@@ -460,7 +478,7 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
struct rswitch_ext_ts_desc *desc;
struct rswitch_desc *linkfix;
int err;
@@ -487,7 +505,7 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
{
- int i, num_queues = priv->gwca.num_queues;
+ unsigned int i, num_queues = priv->gwca.num_queues;
struct rswitch_gwca *gwca = &priv->gwca;
struct device *dev = &priv->pdev->dev;
@@ -537,7 +555,7 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq;
- int index;
+ unsigned int index;
index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
if (index >= priv->gwca.num_queues)
@@ -583,7 +601,7 @@ static void rswitch_txdmac_free(struct net_device *ndev)
rswitch_gwca_put(rdev->priv, rdev->tx_queue);
}
-static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
+static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_device *rdev = priv->rdev[index];
@@ -617,7 +635,7 @@ static void rswitch_rxdmac_free(struct net_device *ndev)
rswitch_gwca_put(rdev->priv, rdev->rx_queue);
}
-static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
+static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
@@ -627,7 +645,8 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
{
- int i, err;
+ unsigned int i;
+ int err;
err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
if (err < 0)
@@ -649,6 +668,8 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv)
iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
+ iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
+ priv->addr + GWMDNC);
iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
@@ -693,15 +714,88 @@ static int rswitch_gwca_halt(struct rswitch_private *priv)
return err;
}
+static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq,
+ struct rswitch_ext_ts_desc *desc)
+{
+ dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
+ u8 die_dt = desc->desc.die_dt & DT_MASK;
+ struct sk_buff *skb = NULL;
+
+ dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* The RX descriptor order will be one of the following:
+ * - FSINGLE
+ * - FSTART -> FEND
+ * - FSTART -> FMID -> FEND
+ */
+
+ /* Check whether the descriptor is unexpected order */
+ switch (die_dt) {
+ case DT_FSTART:
+ case DT_FSINGLE:
+ if (gq->skb_fstart) {
+ dev_kfree_skb_any(gq->skb_fstart);
+ gq->skb_fstart = NULL;
+ ndev->stats.rx_dropped++;
+ }
+ break;
+ case DT_FMID:
+ case DT_FEND:
+ if (!gq->skb_fstart) {
+ ndev->stats.rx_dropped++;
+ return NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Handle the descriptor */
+ switch (die_dt) {
+ case DT_FSTART:
+ case DT_FSINGLE:
+ skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
+ if (skb) {
+ skb_reserve(skb, RSWITCH_HEADROOM);
+ skb_put(skb, pkt_len);
+ gq->pkt_len = pkt_len;
+ if (die_dt == DT_FSTART) {
+ gq->skb_fstart = skb;
+ skb = NULL;
+ }
+ }
+ break;
+ case DT_FMID:
+ case DT_FEND:
+ skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags,
+ virt_to_page(gq->rx_bufs[gq->cur]),
+ offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
+ pkt_len, RSWITCH_BUF_SIZE);
+ if (die_dt == DT_FEND) {
+ skb = gq->skb_fstart;
+ gq->skb_fstart = NULL;
+ }
+ gq->pkt_len += pkt_len;
+ break;
+ default:
+ netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt);
+ break;
+ }
+
+ return skb;
+}
+
static bool rswitch_rx(struct net_device *ndev, int *quota)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->rx_queue;
struct rswitch_ext_ts_desc *desc;
- int limit, boguscnt, num, ret;
+ int limit, boguscnt, ret;
struct sk_buff *skb;
- dma_addr_t dma_addr;
- u16 pkt_len;
+ unsigned int num;
u32 get_ts;
if (*quota <= 0)
@@ -713,11 +807,10 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
dma_rmb();
- pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
- skb = gq->skbs[gq->cur];
- gq->skbs[gq->cur] = NULL;
- dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ skb = rswitch_rx_handle_desc(ndev, gq, desc);
+ if (!skb)
+ goto out;
+
get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
if (get_ts) {
struct skb_shared_hwtstamps *shhwtstamps;
@@ -729,12 +822,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
}
- skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&rdev->napi, skb);
rdev->ndev->stats.rx_packets++;
- rdev->ndev->stats.rx_bytes += pkt_len;
+ rdev->ndev->stats.rx_bytes += gq->pkt_len;
+out:
+ gq->rx_bufs[gq->cur] = NULL;
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->rx_ring[gq->cur];
@@ -743,7 +837,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
}
num = rswitch_get_num_cur_queues(gq);
- ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
+ ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
if (ret < 0)
goto err;
ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
@@ -761,39 +855,32 @@ err:
return 0;
}
-static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
+static void rswitch_tx_free(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->tx_queue;
struct rswitch_ext_desc *desc;
- dma_addr_t dma_addr;
struct sk_buff *skb;
- int free_num = 0;
- int size;
for (; rswitch_get_num_cur_queues(gq) > 0;
gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
desc = &gq->tx_ring[gq->dirty];
- if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+ if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
break;
dma_rmb();
- size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
skb = gq->skbs[gq->dirty];
if (skb) {
- dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr,
- size, DMA_TO_DEVICE);
+ dma_unmap_single(ndev->dev.parent,
+ gq->unmap_addrs[gq->dirty],
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
- free_num++;
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += skb->len;
}
desc->desc.die_dt = DT_EEMPTY;
- rdev->ndev->stats.tx_packets++;
- rdev->ndev->stats.tx_bytes += size;
}
-
- return free_num;
}
static int rswitch_poll(struct napi_struct *napi, int budget)
@@ -808,7 +895,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
priv = rdev->priv;
retry:
- rswitch_tx_free(ndev, true);
+ rswitch_tx_free(ndev);
if (rswitch_rx(ndev, &quota))
goto out;
@@ -851,7 +938,7 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
{
struct rswitch_gwca_queue *gq;
- int i, index, bit;
+ unsigned int i, index, bit;
for (i = 0; i < priv->gwca.num_queues; i++) {
gq = &priv->gwca.queues[i];
@@ -918,8 +1005,8 @@ static void rswitch_ts(struct rswitch_private *priv)
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
struct timespec64 ts;
+ unsigned int num;
u32 tag, port;
- int num;
desc = &gq->ts_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
@@ -1438,7 +1525,7 @@ err_init_one:
static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
phy_exit(priv->rdev[i]->serdes);
@@ -1500,31 +1587,10 @@ static int rswitch_stop(struct net_device *ndev)
return 0;
};
-static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
+ struct sk_buff *skb,
+ struct rswitch_ext_desc *desc)
{
- struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_queue *gq = rdev->tx_queue;
- netdev_tx_t ret = NETDEV_TX_OK;
- struct rswitch_ext_desc *desc;
- dma_addr_t dma_addr;
-
- if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
- netif_stop_subqueue(ndev, 0);
- return NETDEV_TX_BUSY;
- }
-
- if (skb_put_padto(skb, ETH_ZLEN))
- return ret;
-
- dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- goto err_kfree;
-
- gq->skbs[gq->cur] = skb;
- desc = &gq->tx_ring[gq->cur];
- rswitch_desc_set_dptr(&desc->desc, dma_addr);
- desc->desc.info_ds = cpu_to_le16(skb->len);
-
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
@@ -1532,7 +1598,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
if (!ts_info)
- goto err_unmap;
+ return false;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
@@ -1546,18 +1612,97 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
skb_tx_timestamp(skb);
}
+ return true;
+}
+
+static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
+ struct sk_buff *skb,
+ struct rswitch_ext_desc *desc,
+ dma_addr_t dma_addr, u16 len, u8 die_dt)
+{
+ rswitch_desc_set_dptr(&desc->desc, dma_addr);
+ desc->desc.info_ds = cpu_to_le16(len);
+ if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
+ return false;
+
dma_wmb();
- desc->desc.die_dt = DT_FSINGLE | DIE;
+ desc->desc.die_dt = die_dt;
+
+ return true;
+}
+
+static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
+{
+ if (nr_desc == 1)
+ return DT_FSINGLE | DIE;
+ if (index == 0)
+ return DT_FSTART;
+ if (nr_desc - 1 == index)
+ return DT_FEND | DIE;
+ return DT_FMID;
+}
+
+static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
+{
+ switch (die_dt & DT_MASK) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
+ case DT_FSTART:
+ case DT_FMID:
+ return RSWITCH_DESC_BUF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ dma_addr_t dma_addr, dma_addr_orig;
+ netdev_tx_t ret = NETDEV_TX_OK;
+ struct rswitch_ext_desc *desc;
+ unsigned int i, nr_desc;
+ u8 die_dt;
+ u16 len;
+
+ nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
+ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
+ netif_stop_subqueue(ndev, 0);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return ret;
+
+ dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
+ goto err_kfree;
+
+ gq->skbs[gq->cur] = skb;
+ gq->unmap_addrs[gq->cur] = dma_addr_orig;
+
+ /* DT_FSTART should be set at last. So, this is reverse order. */
+ for (i = nr_desc; i-- > 0; ) {
+ desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
+ die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
+ dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
+ len = rswitch_ext_desc_get_len(die_dt, skb->len);
+ if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
+ goto err_unmap;
+ }
+
wmb(); /* gq->cur must be incremented after die_dt was set */
- gq->cur = rswitch_next_queue_index(gq, true, 1);
+ gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+ dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree:
dev_kfree_skb_any(skb);
@@ -1693,7 +1838,7 @@ static const struct of_device_id renesas_eth_sw_of_table[] = {
};
MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
-static void rswitch_etha_init(struct rswitch_private *priv, int index)
+static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_etha *etha = &priv->etha[index];
@@ -1709,7 +1854,7 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index)
etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
}
-static int rswitch_device_alloc(struct rswitch_private *priv, int index)
+static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
{
struct platform_device *pdev = priv->pdev;
struct rswitch_device *rdev;
@@ -1738,6 +1883,8 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
ndev->netdev_ops = &rswitch_netdev_ops;
ndev->ethtool_ops = &rswitch_ethtool_ops;
+ ndev->max_mtu = RSWITCH_MAX_MTU;
+ ndev->min_mtu = ETH_MIN_MTU;
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
@@ -1780,7 +1927,7 @@ out_get_params:
return err;
}
-static void rswitch_device_free(struct rswitch_private *priv, int index)
+static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
@@ -1832,8 +1979,8 @@ static int rswitch_init(struct rswitch_private *priv)
rswitch_fwd_init(priv);
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
- RCAR_GEN4_PTP_CLOCK_S4);
+ err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
+ clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 27c9d3872c0e..72e3ff596d31 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -26,11 +26,17 @@
else
#define TX_RING_SIZE 1024
-#define RX_RING_SIZE 1024
+#define RX_RING_SIZE 4096
#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS)
-#define PKT_BUF_SZ 1584
+#define RSWITCH_MAX_MTU 9600
+#define RSWITCH_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define RSWITCH_DESC_BUF_SIZE 2048
+#define RSWITCH_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
#define RSWITCH_ALIGN 128
+#define RSWITCH_BUF_SIZE (RSWITCH_HEADROOM + RSWITCH_DESC_BUF_SIZE + \
+ RSWITCH_TAILROOM + RSWITCH_ALIGN)
+#define RSWITCH_MAP_BUF_SIZE (RSWITCH_BUF_SIZE - RSWITCH_HEADROOM)
#define RSWITCH_MAX_CTAG_PCP 7
#define RSWITCH_TIMEOUT_US 100000
@@ -768,6 +774,10 @@ enum rswitch_gwca_mode {
#define GWARIRM_ARIOG BIT(0)
#define GWARIRM_ARR BIT(1)
+#define GWMDNC_TSDMN(num) (((num) << 16) & GENMASK(17, 16))
+#define GWMDNC_TXDMN(num) (((num) << 8) & GENMASK(12, 8))
+#define GWMDNC_RXDMN(num) ((num) & GENMASK(4, 0))
+
#define GWDCC_BALR BIT(24)
#define GWDCC_DCP_MASK GENMASK(18, 16)
#define GWDCC_DCP(prio) FIELD_PREP(GWDCC_DCP_MASK, (prio))
@@ -909,7 +919,7 @@ struct rswitch_ext_ts_desc {
} __packed;
struct rswitch_etha {
- int index;
+ unsigned int index;
void __iomem *addr;
void __iomem *coma_addr;
bool external_phy;
@@ -938,15 +948,28 @@ struct rswitch_gwca_queue {
/* Common */
dma_addr_t ring_dma;
- int ring_size;
- int cur;
- int dirty;
+ unsigned int ring_size;
+ unsigned int cur;
+ unsigned int dirty;
- /* For [rt]_ring */
- int index;
+ /* For [rt]x_ring */
+ unsigned int index;
bool dir_tx;
- struct sk_buff **skbs;
struct net_device *ndev; /* queue to ndev for irq */
+
+ union {
+ /* For TX */
+ struct {
+ struct sk_buff **skbs;
+ dma_addr_t *unmap_addrs;
+ };
+ /* For RX */
+ struct {
+ void **rx_bufs;
+ struct sk_buff *skb_fstart;
+ u16 pkt_len;
+ };
+ };
};
struct rswitch_gwca_ts_info {
@@ -959,7 +982,7 @@ struct rswitch_gwca_ts_info {
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
- int index;
+ unsigned int index;
struct rswitch_desc *linkfix_table;
dma_addr_t linkfix_table_dma;
u32 linkfix_table_size;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 6dfa062feebc..8fa6c0e9195b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3706,13 +3706,13 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
}
static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
- struct hwtstamp_config *init)
+ struct kernel_hwtstamp_config *init)
{
return -EOPNOTSUPP;
}
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
- struct hwtstamp_config *init)
+ struct kernel_hwtstamp_config *init)
{
int rc;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 702abbe59b76..cf55202b3a7b 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -37,6 +37,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@@ -60,8 +61,6 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
- .get_rxfh_context = efx_ethtool_get_rxfh_context,
- .set_rxfh_context = efx_ethtool_set_rxfh_context,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 19f4b4d0b851..e9d9de8e648a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -495,11 +495,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = efx_netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
- if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_set_ts_config(efx, ifr);
- if (cmd == SIOCGHWTSTAMP)
- return efx_ptp_get_ts_config(efx, ifr);
-
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
@@ -581,6 +576,23 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
+static int efx_hwtstamp_set(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return efx_ptp_set_ts_config(efx, config, extack);
+}
+
+static int efx_hwtstamp_get(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return efx_ptp_get_ts_config(efx, config);
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -596,6 +608,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_features_check = efx_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = efx_hwtstamp_set,
+ .ndo_hwtstamp_get = efx_hwtstamp_get,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 364323599f7b..37c69c8d90b1 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -240,6 +240,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -269,8 +270,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
- .get_rxfh_context = efx_ethtool_get_rxfh_context,
- .set_rxfh_context = efx_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index a8cbceeb301b..7d5e5db4eac5 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -1163,48 +1163,8 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- int rc;
-
- rc = efx->type->rx_pull_rss_config(efx);
- if (rc)
- return rc;
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, efx->rss_context.rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key)
- memcpy(key, efx->rss_context.rx_hash_key,
- efx->type->rx_hash_key_size);
- return 0;
-}
-
-int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
-
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
- if (!indir && !key)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
- if (!indir)
- indir = efx->rss_context.rx_indir_table;
-
- return efx->type->rx_push_rss_config(efx, true, indir, key);
-}
-
-int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int efx_ethtool_get_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
@@ -1214,7 +1174,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
- ctx = efx_find_rss_context_entry(efx, rss_context);
+ ctx = efx_find_rss_context_entry(efx, rxfh->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -1223,37 +1183,60 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
if (rc)
goto out_unlock;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
- if (key)
- memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, ctx->rx_indir_table,
+ sizeof(ctx->rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, ctx->rx_hash_key,
+ efx->type->rx_hash_key_size);
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
}
-int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+int efx_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ int rc;
+
+ if (rxfh->rss_context)
+ return efx_ethtool_get_rxfh_context(net_dev, rxfh);
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, efx->rss_context.rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, efx->rss_context.rx_hash_key,
+ efx->type->rx_hash_key_size);
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 *rss_context = &rxfh->rss_context;
struct efx_rss_context *ctx;
+ u32 *indir = rxfh->indir;
bool allocated = false;
+ u8 *key = rxfh->key;
int rc;
if (!efx->type->rx_push_rss_context_config)
return -EOPNOTSUPP;
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (delete) {
+ if (rxfh->rss_delete) {
/* alloc + delete == Nothing to do */
rc = -EINVAL;
goto out_unlock;
@@ -1276,7 +1259,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
}
}
- if (delete) {
+ if (rxfh->rss_delete) {
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
@@ -1299,6 +1282,33 @@ out_unlock:
return rc;
}
+int efx_ethtool_set_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 *indir = rxfh->indir;
+ u8 *key = rxfh->key;
+
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
+ return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+
+ if (!indir && !key)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+ if (!indir)
+ indir = efx->rss_context.rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 659491932101..a680e5980213 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -44,16 +44,11 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev);
-int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc);
+int efx_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh);
int efx_ethtool_set_rxfh(struct net_device *net_dev,
- const u32 *indir, const u8 *key, const u8 hfunc);
-int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context);
-int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete);
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 3976a333f7e3..f4db683b80f7 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -1257,31 +1257,33 @@ static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ef4_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ef4_nic *efx = netdev_priv(net_dev);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, efx->rx_indir_table,
+ sizeof(efx->rx_indir_table));
return 0;
}
-static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ef4_ethtool_set_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- return efx->type->rx_push_rss_config(efx, true, indir);
+ return efx->type->rx_push_rss_config(efx, true, rxfh->indir);
}
static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 27d86e90a3bb..f2dd7feb0e0c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1473,7 +1473,7 @@ struct efx_nic_type {
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
- struct hwtstamp_config *init);
+ struct kernel_hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b04fdbb8aece..c3bffbf0ba2b 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -301,7 +301,7 @@ struct efx_ptp_data {
bool reset_required;
struct list_head rxfilters_mcast;
struct list_head rxfilters_ucast;
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
bool enabled;
unsigned int mode;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
@@ -1848,7 +1848,7 @@ int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
return 0;
}
-static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *init)
{
int rc;
@@ -1895,33 +1895,25 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack __always_unused *extack)
{
- struct hwtstamp_config config;
- int rc;
-
/* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- rc = efx_ptp_ts_init(efx, &config);
- if (rc != 0)
- return rc;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config))
- ? -EFAULT : 0;
+ return efx_ptp_ts_init(efx, config);
}
-int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config)
{
+ /* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
-
- return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
- sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+ *config = efx->ptp_data->config;
+ return 0;
}
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 7b1ef7002b3f..2f30dbb490d2 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -18,8 +18,11 @@ void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_remove(struct efx_nic *efx);
-int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
-int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int efx_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_get_mode(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 8c557f6a183c..59d3a6043379 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -495,11 +495,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
- if (cmd == SIOCSHWTSTAMP)
- return efx_siena_ptp_set_ts_config(efx, ifr);
- if (cmd == SIOCGHWTSTAMP)
- return efx_siena_ptp_get_ts_config(efx, ifr);
-
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
@@ -579,6 +574,23 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
+static int efx_siena_hwtstamp_set(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx_siena_ptp_set_ts_config(efx, config, extack);
+}
+
+static int efx_siena_hwtstamp_get(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx_siena_ptp_get_ts_config(efx, config);
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -594,6 +606,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_features_check = efx_siena_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = efx_siena_hwtstamp_set,
+ .ndo_hwtstamp_get = efx_siena_hwtstamp_get,
#ifdef CONFIG_SFC_SIENA_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index e4ec589216c1..14dd3893bdef 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -240,6 +240,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_siena_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -269,8 +270,6 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
- .get_rxfh_context = efx_siena_ethtool_get_rxfh_context,
- .set_rxfh_context = efx_siena_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index f590e87e5a23..5f0a8127e967 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -1164,48 +1164,8 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- rc = efx->type->rx_pull_rss_config(efx);
- if (rc)
- return rc;
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, efx->rss_context.rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key)
- memcpy(key, efx->rss_context.rx_hash_key,
- efx->type->rx_hash_key_size);
- return 0;
-}
-
-int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
- if (!indir && !key)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
- if (!indir)
- indir = efx->rss_context.rx_indir_table;
-
- return efx->type->rx_push_rss_config(efx, true, indir, key);
-}
-
-int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_rss_context *ctx;
@@ -1215,7 +1175,7 @@ int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
- ctx = efx_siena_find_rss_context_entry(efx, rss_context);
+ ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -1224,37 +1184,60 @@ int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
if (rc)
goto out_unlock;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
- if (key)
- memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, ctx->rx_indir_table,
+ sizeof(ctx->rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, ctx->rx_hash_key,
+ efx->type->rx_hash_key_size);
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
}
-int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ if (rxfh->rss_context)
+ return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh);
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, efx->rss_context.rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, efx->rss_context.rx_hash_key,
+ efx->type->rx_hash_key_size);
+ return 0;
+}
+
+static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ u32 *rss_context = &rxfh->rss_context;
struct efx_rss_context *ctx;
+ u32 *indir = rxfh->indir;
bool allocated = false;
+ u8 *key = rxfh->key;
int rc;
if (!efx->type->rx_push_rss_context_config)
return -EOPNOTSUPP;
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (delete) {
+ if (rxfh->rss_delete) {
/* alloc + delete == Nothing to do */
rc = -EINVAL;
goto out_unlock;
@@ -1277,7 +1260,7 @@ int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
}
}
- if (delete) {
+ if (rxfh->rss_delete) {
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
@@ -1300,6 +1283,33 @@ out_unlock:
return rc;
}
+int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 *indir = rxfh->indir;
+ u8 *key = rxfh->key;
+
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
+ efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+
+ if (!indir && !key)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+ if (!indir)
+ indir = efx->rss_context.rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index 04b375dc6800..d674bab0f65b 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -41,16 +41,11 @@ int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev);
-int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc);
+int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh);
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
- const u32 *indir, const u8 *key, const u8 hfunc);
-int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context);
-int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete);
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index ff7bbc325952..94152f595acd 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -1424,7 +1424,7 @@ struct efx_nic_type {
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
- struct hwtstamp_config *init);
+ struct kernel_hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 38e666561bcd..4b5e2f0ba350 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -297,7 +297,7 @@ struct efx_ptp_data {
u32 rxfilter_event;
u32 rxfilter_general;
bool rxfilter_installed;
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
bool enabled;
unsigned int mode;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
@@ -1762,7 +1762,8 @@ int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
return 0;
}
-static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+static int efx_ptp_ts_init(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *init)
{
int rc;
@@ -1799,33 +1800,26 @@ void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack __always_unused *extack)
{
- struct hwtstamp_config config;
- int rc;
-
/* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- rc = efx_ptp_ts_init(efx, &config);
- if (rc != 0)
- return rc;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config))
- ? -EFAULT : 0;
+ return efx_ptp_ts_init(efx, config);
}
-int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config)
{
+ /* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
- sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+ *config = efx->ptp_data->config;
+ return 0;
}
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index 4172f90e9f6f..6352f84424f6 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -15,8 +15,11 @@
struct ethtool_ts_info;
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
-int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
-int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config);
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
struct ethtool_ts_info *ts_info);
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c
index a44c8fa25748..ca33dc08e555 100644
--- a/drivers/net/ethernet/sfc/siena/siena.c
+++ b/drivers/net/ethernet/sfc/siena/siena.c
@@ -136,7 +136,7 @@ static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
}
static int siena_ptp_set_ts_config(struct efx_nic *efx,
- struct hwtstamp_config *init)
+ struct kernel_hwtstamp_config *init)
{
int rc;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 0891e9e49ecb..5ab8b81b84e6 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1302,6 +1302,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = NETSEC_RXBUF_HEADROOM,
.max_len = NETSEC_RX_BUF_SIZE,
+ .napi = &priv->napi,
+ .netdev = priv->ndev,
};
int i, err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 80e598bd4255..26cad4344701 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o \
+ stmmac_xdp.o stmmac_est.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e3f650e88f82..5ba606a596e7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -59,28 +59,51 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+struct stmmac_q_tx_stats {
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_set_ic_bit;
+ u64_stats_t tx_tso_frames;
+ u64_stats_t tx_tso_nfrags;
+};
+
+struct stmmac_napi_tx_stats {
+ u64_stats_t tx_packets;
+ u64_stats_t tx_pkt_n;
+ u64_stats_t poll;
+ u64_stats_t tx_clean;
+ u64_stats_t tx_set_ic_bit;
+};
+
struct stmmac_txq_stats {
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_pkt_n;
- u64 tx_normal_irq_n;
- u64 napi_poll;
- u64 tx_clean;
- u64 tx_set_ic_bit;
- u64 tx_tso_frames;
- u64 tx_tso_nfrags;
- struct u64_stats_sync syncp;
+ /* Updates protected by tx queue lock. */
+ struct u64_stats_sync q_syncp;
+ struct stmmac_q_tx_stats q;
+
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_tx_stats napi;
} ____cacheline_aligned_in_smp;
+struct stmmac_napi_rx_stats {
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_pkt_n;
+ u64_stats_t poll;
+};
+
struct stmmac_rxq_stats {
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_pkt_n;
- u64 rx_normal_irq_n;
- u64 napi_poll;
- struct u64_stats_sync syncp;
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_rx_stats napi;
} ____cacheline_aligned_in_smp;
+/* Updates on each CPU protected by not allowing nested irqs. */
+struct stmmac_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+};
+
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -205,6 +228,7 @@ struct stmmac_extra_stats {
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ struct stmmac_pcpu_stats __percpu *pcpu_stats;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
@@ -216,6 +240,7 @@ struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
+ unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
@@ -563,6 +588,7 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
+ const struct stmmac_est_ops *est;
struct dw_xpcs *xpcs;
struct phylink_pcs *lynx_pcs; /* Lynx external PCS */
struct mii_regs mii; /* MII register Addresses */
@@ -580,6 +606,7 @@ struct mac_device_info {
u32 vlan_filter[32];
bool vlan_fail_q_en;
u8 vlan_fail_q;
+ bool hw_vlan_en;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 8f730ada71f9..6b65420e11b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 137741b94122..b21d99faa2d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
u32 v;
@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c6ff1fa0e04d..6b6d0de09619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1134,6 +1134,35 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
+static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_VLAN_TAG);
+
+ value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= GMAC_VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= GMAC_VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.phylink_get_caps = dwmac4_phylink_get_caps,
@@ -1175,6 +1204,8 @@ const struct stmmac_ops dwmac4_ops = {
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
@@ -1216,14 +1247,14 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .est_configure = dwmac5_est_configure,
- .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1269,14 +1300,14 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .est_configure = dwmac5_est_configure,
- .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 89a14084c611..1c5802e0d7f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -198,6 +198,17 @@ static int dwmac4_get_tx_ls(struct dma_desc *p)
>> TDES3_LAST_DESCRIPTOR_SHIFT;
}
+static u16 dwmac4_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & RDES0_VLAN_TAG_MASK);
+}
+
+static bool dwmac4_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ return ((le32_to_cpu(p->des3) & RDES3_LAST_DESCRIPTOR) &&
+ (le32_to_cpu(p->des3) & RDES3_RDES0_VALID));
+}
+
static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
@@ -551,6 +562,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_tx_owner = dwmac4_set_tx_owner,
.set_rx_owner = dwmac4_set_rx_owner,
.get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9470d3fd2ded..0d185e54eb7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 8fd167501fa0..e02cebc3f1b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -573,143 +573,6 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
-static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
-{
- u32 ctrl;
-
- writel(val, ioaddr + MTL_EST_GCL_DATA);
-
- ctrl = (reg << ADDR_SHIFT);
- ctrl |= gcl ? 0 : GCRR;
-
- writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
-
- ctrl |= SRWO;
- writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
-
- return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
- ctrl, !(ctrl & SRWO), 100, 5000);
-}
-
-int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate)
-{
- int i, ret = 0x0;
- u32 ctrl;
-
- ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
- ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
- ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
- ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
- if (ret)
- return ret;
-
- for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
- if (ret)
- return ret;
- }
-
- ctrl = readl(ioaddr + MTL_EST_CONTROL);
- ctrl &= ~PTOV;
- ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
- if (cfg->enable)
- ctrl |= EEST | SSWL;
- else
- ctrl &= ~EEST;
-
- writel(ctrl, ioaddr + MTL_EST_CONTROL);
-
- /* Configure EST interrupt */
- if (cfg->enable)
- ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
- else
- ctrl = 0;
-
- writel(ctrl, ioaddr + MTL_EST_INT_EN);
-
- return 0;
-}
-
-void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt)
-{
- u32 status, value, feqn, hbfq, hbfs, btrl;
- u32 txqcnt_mask = (1 << txqcnt) - 1;
-
- status = readl(ioaddr + MTL_EST_STATUS);
-
- value = (CGCE | HLBS | HLBF | BTRE | SWLC);
-
- /* Return if there is no error */
- if (!(status & value))
- return;
-
- if (status & CGCE) {
- /* Clear Interrupt */
- writel(CGCE, ioaddr + MTL_EST_STATUS);
-
- x->mtl_est_cgce++;
- }
-
- if (status & HLBS) {
- value = readl(ioaddr + MTL_EST_SCH_ERR);
- value &= txqcnt_mask;
-
- x->mtl_est_hlbs++;
-
- /* Clear Interrupt */
- writel(value, ioaddr + MTL_EST_SCH_ERR);
-
- /* Collecting info to shows all the queues that has HLBS
- * issue. The only way to clear this is to clear the
- * statistic
- */
- if (net_ratelimit())
- netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
- }
-
- if (status & HLBF) {
- value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
- feqn = value & txqcnt_mask;
-
- value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
- hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
- hbfs = value & SZ_CAP_HBFS_MASK;
-
- x->mtl_est_hlbf++;
-
- /* Clear Interrupt */
- writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
-
- if (net_ratelimit())
- netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
- hbfq, hbfs);
- }
-
- if (status & BTRE) {
- if ((status & BTRL) == BTRL_MAX)
- x->mtl_est_btrlm++;
- else
- x->mtl_est_btre++;
-
- btrl = (status & BTRL) >> BTRL_SHIFT;
-
- if (net_ratelimit())
- netdev_info(dev, "EST: BTR Error Loop Count %u\n",
- btrl);
-
- writel(BTRE, ioaddr + MTL_EST_STATUS);
- }
-
- if (status & SWLC) {
- writel(SWLC, ioaddr + MTL_EST_STATUS);
- netdev_info(dev, "EST: SWOL has been switched\n");
- }
-}
-
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 34e620790eb3..bf33a51d229e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,53 +39,6 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
-#define MTL_EST_CONTROL 0x00000c50
-#define PTOV GENMASK(31, 24)
-#define PTOV_SHIFT 24
-#define SSWL BIT(1)
-#define EEST BIT(0)
-
-#define MTL_EST_STATUS 0x00000c58
-#define BTRL GENMASK(11, 8)
-#define BTRL_SHIFT 8
-#define BTRL_MAX (0xF << BTRL_SHIFT)
-#define SWOL BIT(7)
-#define SWOL_SHIFT 7
-#define CGCE BIT(4)
-#define HLBS BIT(3)
-#define HLBF BIT(2)
-#define BTRE BIT(1)
-#define SWLC BIT(0)
-
-#define MTL_EST_SCH_ERR 0x00000c60
-#define MTL_EST_FRM_SZ_ERR 0x00000c64
-#define MTL_EST_FRM_SZ_CAP 0x00000c68
-#define SZ_CAP_HBFS_MASK GENMASK(14, 0)
-#define SZ_CAP_HBFQ_SHIFT 16
-#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \
- ((val) > 4 ? GENMASK(18, 16) : \
- (val) > 2 ? GENMASK(17, 16) : \
- BIT(16)); })
-
-#define MTL_EST_INT_EN 0x00000c70
-#define IECGCE CGCE
-#define IEHS HLBS
-#define IEHF HLBF
-#define IEBE BTRE
-#define IECC SWLC
-
-#define MTL_EST_GCL_CONTROL 0x00000c80
-#define BTR_LOW 0x0
-#define BTR_HIGH 0x1
-#define CTR_LOW 0x2
-#define CTR_HIGH 0x3
-#define TER 0x4
-#define LLR 0x5
-#define ADDR_SHIFT 8
-#define GCRR BIT(2)
-#define SRWO BIT(0)
-#define MTL_EST_GCL_DATA 0x00000c84
-
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -149,10 +102,6 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
-int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate);
-void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 7907d62d3437..85e18f9a22f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a4e8b498dea9..6a2c7d22df1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -284,22 +284,6 @@
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
-#define XGMAC_MTL_EST_CONTROL 0x00001050
-#define XGMAC_PTOV GENMASK(31, 23)
-#define XGMAC_PTOV_SHIFT 23
-#define XGMAC_SSWL BIT(1)
-#define XGMAC_EEST BIT(0)
-#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
-#define XGMAC_BTR_LOW 0x0
-#define XGMAC_BTR_HIGH 0x1
-#define XGMAC_CTR_LOW 0x2
-#define XGMAC_CTR_HIGH 0x3
-#define XGMAC_TER 0x4
-#define XGMAC_LLR 0x5
-#define XGMAC_ADDR_SHIFT 8
-#define XGMAC_GCRR BIT(2)
-#define XGMAC_SRWO BIT(0)
-#define XGMAC_MTL_EST_GCL_DATA 0x00001084
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
@@ -319,6 +303,8 @@
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_DPP_CONTROL 0x000010e0
+#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -401,6 +387,7 @@
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index a74e71db79f9..1af2f89a0504 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
+#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
+#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+ { true, "TDPES0", DPP_TX_ERR },
+ { true, "TDPES1", DPP_TX_ERR },
+ { true, "TDPES2", DPP_TX_ERR },
+ { true, "TDPES3", DPP_TX_ERR },
+ { true, "TDPES4", DPP_TX_ERR },
+ { true, "TDPES5", DPP_TX_ERR },
+ { true, "TDPES6", DPP_TX_ERR },
+ { true, "TDPES7", DPP_TX_ERR },
+ { true, "TDPES8", DPP_TX_ERR },
+ { true, "TDPES9", DPP_TX_ERR },
+ { true, "TDPES10", DPP_TX_ERR },
+ { true, "TDPES11", DPP_TX_ERR },
+ { true, "TDPES12", DPP_TX_ERR },
+ { true, "TDPES13", DPP_TX_ERR },
+ { true, "TDPES14", DPP_TX_ERR },
+ { true, "TDPES15", DPP_TX_ERR },
+ { true, "RDPES0", DPP_RX_ERR },
+ { true, "RDPES1", DPP_RX_ERR },
+ { true, "RDPES2", DPP_RX_ERR },
+ { true, "RDPES3", DPP_RX_ERR },
+ { true, "RDPES4", DPP_RX_ERR },
+ { true, "RDPES5", DPP_RX_ERR },
+ { true, "RDPES6", DPP_RX_ERR },
+ { true, "RDPES7", DPP_RX_ERR },
+ { true, "RDPES8", DPP_RX_ERR },
+ { true, "RDPES9", DPP_RX_ERR },
+ { true, "RDPES10", DPP_RX_ERR },
+ { true, "RDPES11", DPP_RX_ERR },
+ { true, "RDPES12", DPP_RX_ERR },
+ { true, "RDPES13", DPP_RX_ERR },
+ { true, "RDPES14", DPP_RX_ERR },
+ { true, "RDPES15", DPP_RX_ERR },
+};
+
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
@@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+ dwxgmac3_dma_dpp_errors,
+ STAT_OFF(dma_dpp_errors), stats);
}
static int
@@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ /* 5. Enable Data Path Parity Protection */
+ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+ /* already enabled by default, explicit enable it again */
+ value &= ~XGMAC_DPP_DISABLE;
+ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
return 0;
}
@@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
ret |= !corr;
}
- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+ * Parity Errors here
+ */
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -930,6 +985,7 @@ static const struct dwxgmac3_error {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
+ { dwxgmac3_dma_dpp_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
@@ -1433,57 +1489,6 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
-{
- u32 ctrl;
-
- writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
-
- ctrl = (reg << XGMAC_ADDR_SHIFT);
- ctrl |= gcl ? 0 : XGMAC_GCRR;
-
- writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
-
- ctrl |= XGMAC_SRWO;
- writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
-
- return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
- ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
-}
-
-static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate)
-{
- int i, ret = 0x0;
- u32 ctrl;
-
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
- if (ret)
- return ret;
-
- for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
- if (ret)
- return ret;
- }
-
- ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
- ctrl &= ~XGMAC_PTOV;
- ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
- if (cfg->enable)
- ctrl |= XGMAC_EEST | XGMAC_SSWL;
- else
- ctrl &= ~XGMAC_EEST;
-
- writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
- return 0;
-}
-
static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq,
u32 num_rxq, bool enable)
@@ -1553,7 +1558,6 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure,
};
@@ -1615,7 +1619,6 @@ const struct stmmac_ops dwxlgmac2_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 3cde695fec91..dd2ab6185c40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index b8ba8f2d8041..29367105df54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -7,6 +7,7 @@
#include "common.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+#include "stmmac_est.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -114,6 +115,7 @@ static const struct stmmac_hwif_entry {
const void *mode;
const void *tc;
const void *mmc;
+ const void *est;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -162,6 +164,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
@@ -170,6 +173,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
@@ -180,6 +184,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
@@ -188,6 +193,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -198,6 +204,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
@@ -206,6 +213,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -216,6 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
@@ -224,6 +233,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -235,6 +245,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
+ .est_off = EST_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -243,6 +254,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
}, {
@@ -254,6 +266,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
+ .est_off = EST_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -262,6 +275,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
.quirks = stmmac_dwxlgmac_quirks,
},
@@ -296,6 +310,10 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
(needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
priv->mmcaddr = priv->ioaddr +
(needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+ if (needs_gmac4)
+ priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET;
+ else if (needs_xgmac)
+ priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
/* Check for HW specific setup first */
if (priv->plat->setup) {
@@ -332,10 +350,13 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->mode = mac->mode ? : entry->mode;
mac->tc = mac->tc ? : entry->tc;
mac->mmc = mac->mmc ? : entry->mmc;
+ mac->est = mac->est ? : entry->est;
priv->hw = mac;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+ if (entry->est)
+ priv->estaddr = priv->ioaddr + entry->regs.est_off;
/* Entry found */
if (needs_setup) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 68aa2d5ca6e5..7be04b54738b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -56,6 +56,10 @@ struct stmmac_desc_ops {
void (*set_tx_ic)(struct dma_desc *p);
/* Last tx segment reports the transmit status */
int (*get_tx_ls)(struct dma_desc *p);
+ /* Get the tag of the descriptor */
+ u16 (*get_rx_vlan_tci)(struct dma_desc *p);
+ /* Get the valid status of descriptor */
+ bool (*get_rx_vlan_valid)(struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
int (*tx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr);
@@ -117,6 +121,10 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_tx_ic, __args)
#define stmmac_get_tx_ls(__priv, __args...) \
stmmac_do_callback(__priv, desc, get_tx_ls, __args)
+#define stmmac_get_rx_vlan_tci(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_vlan_tci, __args)
+#define stmmac_get_rx_vlan_valid(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_vlan_valid, __args)
#define stmmac_tx_status(__priv, __args...) \
stmmac_do_callback(__priv, desc, tx_status, __args)
#define stmmac_get_tx_len(__priv, __args...) \
@@ -388,6 +396,9 @@ struct stmmac_ops {
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
struct mac_device_info *hw,
__be16 proto, u16 vid);
@@ -408,10 +419,6 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
- int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate);
- void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable);
@@ -499,6 +506,10 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
#define stmmac_enable_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
@@ -515,10 +526,6 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
-#define stmmac_est_configure(__priv, __args...) \
- stmmac_do_callback(__priv, mac, est_configure, __args)
-#define stmmac_est_irq_status(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, est_irq_status, __args)
#define stmmac_fpe_configure(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
#define stmmac_fpe_send_mpacket(__priv, __args...) \
@@ -644,9 +651,22 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
+struct stmmac_est_ops {
+ int (*configure)(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*irq_status)(struct stmmac_priv *priv, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+};
+
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, est, configure, __args)
+#define stmmac_est_irq_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, est, irq_status, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
+ u32 est_off;
};
extern const struct stmmac_ops dwmac100_ops;
@@ -665,6 +685,7 @@ extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a0c05925883e..14c9d2637dfe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,8 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_lpi_usec;
+ unsigned int mmc_tx_lpi_tran;
/* MMC RX counter registers */
unsigned int mmc_rx_framecount_gb;
@@ -78,9 +80,16 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_lpi_usec;
+ unsigned int mmc_rx_lpi_tran;
+ unsigned int mmc_rx_discard_frames_gb;
+ unsigned int mmc_rx_discard_octets_gb;
+ unsigned int mmc_rx_align_err_frames;
+
/* IPC */
unsigned int mmc_rx_ipc_intr_mask;
unsigned int mmc_rx_ipc_intr;
+
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
@@ -118,9 +127,14 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+ /* Stream-Gate Filter */
+ unsigned int mmc_sgf_pass_fragment_cntr;
+ unsigned int mmc_sgf_fail_fragment_cntr;
+
/* FPE */
unsigned int mmc_tx_fpe_fragment_cntr;
unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_tx_gate_overrun_cntr;
unsigned int mmc_rx_packet_assembly_err_cntr;
unsigned int mmc_rx_packet_smd_err_cntr;
unsigned int mmc_rx_packet_assembly_ok_cntr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 6a7c1d325c46..8597c6abae8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -177,9 +177,12 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+#define MMC_XGMAC_SGF_PASS_PKT 0x1f0
+#define MMC_XGMAC_SGF_FAIL_PKT 0x1f4
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_TX_GATE_OVERRUN 0x210
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
@@ -187,6 +190,40 @@
#define MMC_XGMAC_RX_FPE_FRAG 0x234
#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
+#define MMC_XGMAC_RX_IPV4_GD 0x264
+#define MMC_XGMAC_RX_IPV4_HDERR 0x26c
+#define MMC_XGMAC_RX_IPV4_NOPAY 0x274
+#define MMC_XGMAC_RX_IPV4_FRAG 0x27c
+#define MMC_XGMAC_RX_IPV4_UDSBL 0x284
+
+#define MMC_XGMAC_RX_IPV6_GD 0x28c
+#define MMC_XGMAC_RX_IPV6_HDERR 0x294
+#define MMC_XGMAC_RX_IPV6_NOPAY 0x29c
+
+#define MMC_XGMAC_RX_UDP_GD 0x2a4
+#define MMC_XGMAC_RX_UDP_ERR 0x2ac
+#define MMC_XGMAC_RX_TCP_GD 0x2b4
+#define MMC_XGMAC_RX_TCP_ERR 0x2bc
+#define MMC_XGMAC_RX_ICMP_GD 0x2c4
+#define MMC_XGMAC_RX_ICMP_ERR 0x2cc
+
+#define MMC_XGMAC_RX_IPV4_GD_OCTETS 0x2d4
+#define MMC_XGMAC_RX_IPV4_HDERR_OCTETS 0x2dc
+#define MMC_XGMAC_RX_IPV4_NOPAY_OCTETS 0x2e4
+#define MMC_XGMAC_RX_IPV4_FRAG_OCTETS 0x2ec
+#define MMC_XGMAC_RX_IPV4_UDSBL_OCTETS 0x2f4
+
+#define MMC_XGMAC_RX_IPV6_GD_OCTETS 0x2fc
+#define MMC_XGMAC_RX_IPV6_HDERR_OCTETS 0x304
+#define MMC_XGMAC_RX_IPV6_NOPAY_OCTETS 0x30c
+
+#define MMC_XGMAC_RX_UDP_GD_OCTETS 0x314
+#define MMC_XGMAC_RX_UDP_ERR_OCTETS 0x31c
+#define MMC_XGMAC_RX_TCP_GD_OCTETS 0x324
+#define MMC_XGMAC_RX_TCP_ERR_OCTETS 0x32c
+#define MMC_XGMAC_RX_ICMP_GD_OCTETS 0x334
+#define MMC_XGMAC_RX_ICMP_ERR_OCTETS 0x33c
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -414,6 +451,8 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
&mmc->mmc_tx_pause_frame);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
&mmc->mmc_tx_vlan_frame_g);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_XGMAC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_XGMAC_TX_LPI_TRAN);
/* MMC RX counter registers */
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
@@ -459,9 +498,23 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
&mmc->mmc_rx_vlan_frames_gb);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
-
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_XGMAC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_XGMAC_RX_LPI_TRAN);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_DISCARD_PKT_GB,
+ &mmc->mmc_rx_discard_frames_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_DISCARD_OCT_GB,
+ &mmc->mmc_rx_discard_octets_gb);
+ mmc->mmc_rx_align_err_frames +=
+ readl(mmcaddr + MMC_XGMAC_RX_ALIGN_ERR_PKT);
+
+ mmc->mmc_sgf_pass_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_SGF_PASS_PKT);
+ mmc->mmc_sgf_fail_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_SGF_FAIL_PKT);
mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_GATE_OVERRUN,
+ &mmc->mmc_tx_gate_overrun_cntr);
mmc->mmc_rx_packet_assembly_err_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
mmc->mmc_rx_packet_smd_err_cntr +=
@@ -470,6 +523,68 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
mmc->mmc_rx_fpe_fragment_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_GD,
+ &mmc->mmc_rx_ipv4_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_HDERR,
+ &mmc->mmc_rx_ipv4_hderr);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_NOPAY,
+ &mmc->mmc_rx_ipv4_nopay);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_FRAG,
+ &mmc->mmc_rx_ipv4_frag);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_UDSBL,
+ &mmc->mmc_rx_ipv4_udsbl);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_GD,
+ &mmc->mmc_rx_ipv6_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_HDERR,
+ &mmc->mmc_rx_ipv6_hderr);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_NOPAY,
+ &mmc->mmc_rx_ipv6_nopay);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_GD,
+ &mmc->mmc_rx_udp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_ERR,
+ &mmc->mmc_rx_udp_err);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_GD,
+ &mmc->mmc_rx_tcp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_ERR,
+ &mmc->mmc_rx_tcp_err);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_GD,
+ &mmc->mmc_rx_icmp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_ERR,
+ &mmc->mmc_rx_icmp_err);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_GD_OCTETS,
+ &mmc->mmc_rx_ipv4_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_HDERR_OCTETS,
+ &mmc->mmc_rx_ipv4_hderr_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_NOPAY_OCTETS,
+ &mmc->mmc_rx_ipv4_nopay_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_FRAG_OCTETS,
+ &mmc->mmc_rx_ipv4_frag_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_UDSBL_OCTETS,
+ &mmc->mmc_rx_ipv4_udsbl_octets);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_GD_OCTETS,
+ &mmc->mmc_rx_ipv6_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_HDERR_OCTETS,
+ &mmc->mmc_rx_ipv6_hderr_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_NOPAY_OCTETS,
+ &mmc->mmc_rx_ipv6_nopay_octets);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_GD_OCTETS,
+ &mmc->mmc_rx_udp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_ERR_OCTETS,
+ &mmc->mmc_rx_udp_err_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_GD_OCTETS,
+ &mmc->mmc_rx_tcp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_ERR_OCTETS,
+ &mmc->mmc_rx_tcp_err_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_GD_OCTETS,
+ &mmc->mmc_rx_icmp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_ERR_OCTETS,
+ &mmc->mmc_rx_icmp_err_octets);
}
const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cd7a9768de5f..f155e4841c62 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -51,6 +51,7 @@ struct stmmac_tx_info {
bool last_segment;
bool is_jumbo;
enum stmmac_txbuf_type buf_type;
+ struct xsk_tx_metadata_compl xsk_meta;
};
#define STMMAC_TBS_AVAIL BIT(0)
@@ -100,6 +101,17 @@ struct stmmac_xdp_buff {
struct dma_desc *ndesc;
};
+struct stmmac_metadata_request {
+ struct stmmac_priv *priv;
+ struct dma_desc *tx_desc;
+ bool *set_ic;
+};
+
+struct stmmac_xsk_tx_complete {
+ struct stmmac_priv *priv;
+ struct dma_desc *desc;
+};
+
struct stmmac_rx_queue {
u32 rx_count_frames;
u32 queue_index;
@@ -255,6 +267,7 @@ struct stmmac_priv {
u32 msg_enable;
int wolopts;
int wol_irq;
+ bool wol_irq_disabled;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
@@ -283,6 +296,7 @@ struct stmmac_priv {
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
int sfty_ce_irq;
int sfty_ue_irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
new file mode 100644
index 000000000000..4da6ccc17c20
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, Intel Corporation
+ * stmmac EST(802.3 Qbv) handling
+ */
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include "stmmac.h"
+#include "stmmac_est.h"
+
+static int est_write(void __iomem *est_addr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, est_addr + EST_GCL_DATA);
+
+ ctrl = (reg << EST_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : EST_GCRR;
+ writel(ctrl, est_addr + EST_GCL_CONTROL);
+
+ ctrl |= EST_SRWO;
+ writel(ctrl, est_addr + EST_GCL_CONTROL);
+
+ return readl_poll_timeout(est_addr + EST_GCL_CONTROL, ctrl,
+ !(ctrl & EST_SRWO), 100, 5000);
+}
+
+static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ void __iomem *est_addr = priv->estaddr;
+ int i, ret = 0;
+ u32 ctrl;
+
+ ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
+ ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
+ ret |= est_write(est_addr, EST_TER, cfg->ter, false);
+ ret |= est_write(est_addr, EST_LLR, cfg->gcl_size, false);
+ ret |= est_write(est_addr, EST_CTR_LOW, cfg->ctr[0], false);
+ ret |= est_write(est_addr, EST_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = est_write(est_addr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(est_addr + EST_CONTROL);
+ if (priv->plat->has_xgmac) {
+ ctrl &= ~EST_XGMAC_PTOV;
+ ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) <<
+ EST_XGMAC_PTOV_SHIFT;
+ } else {
+ ctrl &= ~EST_GMAC5_PTOV;
+ ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_GMAC5_PTOV_MUL) <<
+ EST_GMAC5_PTOV_SHIFT;
+ }
+ if (cfg->enable)
+ ctrl |= EST_EEST | EST_SSWL;
+ else
+ ctrl &= ~EST_EEST;
+
+ writel(ctrl, est_addr + EST_CONTROL);
+
+ /* Configure EST interrupt */
+ if (cfg->enable)
+ ctrl = EST_IECGCE | EST_IEHS | EST_IEHF | EST_IEBE | EST_IECC;
+ else
+ ctrl = 0;
+
+ writel(ctrl, est_addr + EST_INT_EN);
+
+ return 0;
+}
+
+static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt)
+{
+ u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
+ void __iomem *est_addr = priv->estaddr;
+ u32 txqcnt_mask = BIT(txqcnt) - 1;
+
+ status = readl(est_addr + EST_STATUS);
+
+ value = EST_CGCE | EST_HLBS | EST_HLBF | EST_BTRE | EST_SWLC;
+
+ /* Return if there is no error */
+ if (!(status & value))
+ return;
+
+ if (status & EST_CGCE) {
+ /* Clear Interrupt */
+ writel(EST_CGCE, est_addr + EST_STATUS);
+
+ x->mtl_est_cgce++;
+ }
+
+ if (status & EST_HLBS) {
+ value = readl(est_addr + EST_SCH_ERR);
+ value &= txqcnt_mask;
+
+ x->mtl_est_hlbs++;
+
+ /* Clear Interrupt */
+ writel(value, est_addr + EST_SCH_ERR);
+
+ /* Collecting info to shows all the queues that has HLBS
+ * issue. The only way to clear this is to clear the
+ * statistic
+ */
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
+ }
+
+ if (status & EST_HLBF) {
+ value = readl(est_addr + EST_FRM_SZ_ERR);
+ feqn = value & txqcnt_mask;
+
+ value = readl(est_addr + EST_FRM_SZ_CAP);
+ hbfq = (value & EST_SZ_CAP_HBFQ_MASK(txqcnt)) >>
+ EST_SZ_CAP_HBFQ_SHIFT;
+ hbfs = value & EST_SZ_CAP_HBFS_MASK;
+
+ x->mtl_est_hlbf++;
+
+ /* Clear Interrupt */
+ writel(feqn, est_addr + EST_FRM_SZ_ERR);
+
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
+ hbfq, hbfs);
+ }
+
+ if (status & EST_BTRE) {
+ if (priv->plat->has_xgmac) {
+ btrl = FIELD_GET(EST_XGMAC_BTRL, status);
+ btrl_max = FIELD_MAX(EST_XGMAC_BTRL);
+ } else {
+ btrl = FIELD_GET(EST_GMAC5_BTRL, status);
+ btrl_max = FIELD_MAX(EST_GMAC5_BTRL);
+ }
+ if (btrl == btrl_max)
+ x->mtl_est_btrlm++;
+ else
+ x->mtl_est_btre++;
+
+ if (net_ratelimit())
+ netdev_info(dev, "EST: BTR Error Loop Count %u\n",
+ btrl);
+
+ writel(EST_BTRE, est_addr + EST_STATUS);
+ }
+
+ if (status & EST_SWLC) {
+ writel(EST_SWLC, est_addr + EST_STATUS);
+ netdev_info(dev, "EST: SWOL has been switched\n");
+ }
+}
+
+const struct stmmac_est_ops dwmac510_est_ops = {
+ .configure = est_configure,
+ .irq_status = est_irq_status,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
new file mode 100644
index 000000000000..7a858c566e7e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ * stmmac EST(802.3 Qbv) handling
+ */
+
+#define EST_GMAC4_OFFSET 0x00000c50
+#define EST_XGMAC_OFFSET 0x00001050
+
+#define EST_CONTROL 0x00000000
+#define EST_GMAC5_PTOV GENMASK(31, 24)
+#define EST_GMAC5_PTOV_SHIFT 24
+#define EST_GMAC5_PTOV_MUL 6
+#define EST_XGMAC_PTOV GENMASK(31, 23)
+#define EST_XGMAC_PTOV_SHIFT 23
+#define EST_XGMAC_PTOV_MUL 9
+#define EST_SSWL BIT(1)
+#define EST_EEST BIT(0)
+
+#define EST_STATUS 0x00000008
+#define EST_GMAC5_BTRL GENMASK(11, 8)
+#define EST_XGMAC_BTRL GENMASK(15, 8)
+#define EST_SWOL BIT(7)
+#define EST_SWOL_SHIFT 7
+#define EST_CGCE BIT(4)
+#define EST_HLBS BIT(3)
+#define EST_HLBF BIT(2)
+#define EST_BTRE BIT(1)
+#define EST_SWLC BIT(0)
+
+#define EST_SCH_ERR 0x00000010
+
+#define EST_FRM_SZ_ERR 0x00000014
+
+#define EST_FRM_SZ_CAP 0x00000018
+#define EST_SZ_CAP_HBFS_MASK GENMASK(14, 0)
+#define EST_SZ_CAP_HBFQ_SHIFT 16
+#define EST_SZ_CAP_HBFQ_MASK(val) \
+ ({ \
+ typeof(val) _val = (val); \
+ (_val > 4 ? GENMASK(18, 16) : \
+ _val > 2 ? GENMASK(17, 16) : \
+ BIT(16)); \
+ })
+
+#define EST_INT_EN 0x00000020
+#define EST_IECGCE EST_CGCE
+#define EST_IEHS EST_HLBS
+#define EST_IEHF EST_HLBF
+#define EST_IEBE EST_BTRE
+#define EST_IECC EST_SWLC
+
+#define EST_GCL_CONTROL 0x00000030
+#define EST_BTR_LOW 0x0
+#define EST_BTR_HIGH 0x1
+#define EST_CTR_LOW 0x2
+#define EST_CTR_HIGH 0x3
+#define EST_TER 0x4
+#define EST_LLR 0x5
+#define EST_ADDR_SHIFT 8
+#define EST_GCRR BIT(2)
+#define EST_SRWO BIT(0)
+
+#define EST_GCL_DATA 0x00000034
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f628411ae4ae..ec44becf0e2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,8 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_lpi_usec),
+ STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
@@ -236,6 +238,11 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_lpi_usec),
+ STMMAC_MMC_STAT(mmc_rx_lpi_tran),
+ STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_align_err_frames),
STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
@@ -266,8 +273,11 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_sgf_pass_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_sgf_fail_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_tx_gate_overrun_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
@@ -311,8 +321,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII) {
+ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+ (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII)) {
struct rgmii_adv adv;
u32 supported, advertising, lp_advertising;
@@ -397,8 +408,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII) {
+ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+ (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII)) {
/* Only support ANE */
if (cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
@@ -537,49 +549,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
}
+static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
+static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
- int q, stat;
- u64 *pos;
- char *p;
+ int q;
- pos = data;
for (q = 0; q < tx_cnt; q++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
- struct stmmac_txq_stats snapshot;
+ u64 pkt_n;
- data = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
- for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ += (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_tx_normal_irq_n(priv, q);
}
- pos = data;
for (q = 0; q < rx_cnt; q++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
- struct stmmac_rxq_stats snapshot;
+ u64 pkt_n;
- data = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
- for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ += (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_rx_normal_irq_n(priv, q);
}
}
@@ -638,39 +680,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
- struct stmmac_rxq_stats snapshot;
+ struct stmmac_napi_rx_stats snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-
- data[j++] += snapshot.rx_pkt_n;
- data[j++] += snapshot.rx_normal_irq_n;
- normal_irq_n += snapshot.rx_normal_irq_n;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ snapshot = rxq_stats->napi;
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
+ n_irq = stmmac_get_rx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ napi_poll += u64_stats_read(&snapshot.poll);
}
pos = j;
for (i = 0; i < tx_queues_count; i++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
- struct stmmac_txq_stats snapshot;
+ struct stmmac_napi_tx_stats napi_snapshot;
+ struct stmmac_q_tx_stats q_snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
-
- data[j++] += snapshot.tx_pkt_n;
- data[j++] += snapshot.tx_normal_irq_n;
- normal_irq_n += snapshot.tx_normal_irq_n;
- data[j++] += snapshot.tx_clean;
- data[j++] += snapshot.tx_set_ic_bit;
- data[j++] += snapshot.tx_tso_frames;
- data[j++] += snapshot.tx_tso_nfrags;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ q_snapshot = txq_stats->q;
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ napi_snapshot = txq_stats->napi;
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
+ n_irq = stmmac_get_tx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
+ data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
+ u64_stats_read(&napi_snapshot.tx_set_ic_bit);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
+ napi_poll += u64_stats_read(&napi_snapshot.poll);
}
normal_irq_n += priv->xstats.rx_early_irq;
data[j++] = normal_irq_n;
@@ -825,10 +877,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
pr_info("stmmac: wakeup enable\n");
device_set_wakeup_enable(priv->device, 1);
- enable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced enable_irq_wake calls */
+ if (priv->wol_irq_disabled)
+ enable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(priv->device, 0);
- disable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced disable_irq_wake calls */
+ if (!priv->wol_irq_disabled)
+ disable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = true;
}
mutex_lock(&priv->lock);
@@ -1077,41 +1135,42 @@ static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
return ARRAY_SIZE(priv->rss.table);
}
-static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int stmmac_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
- indir[i] = priv->rss.table[i];
+ rxfh->indir[i] = priv->rss.table[i];
}
- if (key)
- memcpy(key, priv->rss.key, sizeof(priv->rss.key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->key)
+ memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key));
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int stmmac_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
- priv->rss.table[i] = indir[i];
+ priv->rss.table[i] = rxfh->indir[i];
}
- if (key)
- memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+ if (rxfh->key)
+ memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key));
return stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 37e64283f910..7c6aef033a45 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2431,6 +2431,46 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
+static void stmmac_xsk_request_timestamp(void *_priv)
+{
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
+ *meta_req->set_ic = true;
+}
+
+static u64 stmmac_xsk_fill_timestamp(void *_priv)
+{
+ struct stmmac_xsk_tx_complete *tx_compl = _priv;
+ struct stmmac_priv *priv = tx_compl->priv;
+ struct dma_desc *desc = tx_compl->desc;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return 0;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, desc)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+ return ns_to_ktime(ns);
+ }
+
+ return 0;
+}
+
+static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = stmmac_xsk_request_timestamp,
+ .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+};
+
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
@@ -2442,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct xdp_desc xdp_desc;
bool work_done = true;
u32 tx_set_ic_bit = 0;
- unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2450,6 +2489,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
budget = min(budget, stmmac_tx_avail(priv, queue));
while (budget-- > 0) {
+ struct stmmac_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
bool set_ic;
@@ -2473,6 +2514,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_desc = tx_q->dma_tx + entry;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
@@ -2500,6 +2542,11 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
else
set_ic = false;
+ meta_req.priv = priv;
+ meta_req.tx_desc = tx_desc;
+ meta_req.set_ic = &set_ic;
+ xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
+ &meta_req);
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
@@ -2512,12 +2559,15 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ xsk_tx_metadata_to_compl(meta,
+ &tx_q->tx_skbuff_dma[entry].xsk_meta);
+
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2565,7 +2615,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
- unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2621,8 +2670,19 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
} else {
tx_packets++;
}
- if (skb)
+ if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
+ struct stmmac_xsk_tx_complete tx_compl = {
+ .priv = priv,
+ .desc = p,
+ };
+
+ xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
+ &stmmac_xsk_tx_metadata_ops,
+ &tx_compl);
+ }
}
if (likely(tx_q->tx_skbuff_dma[entry].buf &&
@@ -2721,11 +2781,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
if (tx_q->dirty_tx != tx_q->cur_tx)
*pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_packets += tx_packets;
- txq_stats->tx_pkt_n += tx_packets;
- txq_stats->tx_clean++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+ u64_stats_inc(&txq_stats->napi.tx_clean);
+ u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@ -3470,6 +3530,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+
if (priv->dma_cap.fpesel) {
stmmac_fpe_start_wq(priv);
@@ -3565,6 +3627,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
+ priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3868,6 +3931,9 @@ static int __stmmac_open(struct net_device *dev,
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
+ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
+ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
stmmac_reset_queues_param(priv);
@@ -3940,8 +4006,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- if (priv->fpe_wq)
+ if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
+ priv->fpe_wq = NULL;
+ }
netdev_info(priv->dev, "FPE workqueue stop");
}
@@ -4146,7 +4214,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
- unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4311,13 +4378,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
- txq_stats->tx_tso_frames++;
- txq_stats->tx_tso_nfrags += nfrags;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ u64_stats_inc(&txq_stats->q.tx_tso_frames);
+ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4372,6 +4439,28 @@ dma_map_err:
}
/**
+ * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
+ * @skb: socket buffer to check
+ *
+ * Check if a packet has an ethertype that will trigger the IP header checks
+ * and IP/TCP checksum engine of the stmmac core.
+ *
+ * Return: true if the ethertype can trigger the checksum engine, false
+ * otherwise
+ */
+static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
+{
+ int depth = 0;
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
+ &depth);
+
+ return (depth <= ETH_HLEN) &&
+ (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -4394,7 +4483,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
- unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4435,9 +4523,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* DWMAC IPs can be synthesized to support tx coe only for a few tx
* queues. In that case, checksum offloading for those queues that don't
* support tx coe needs to fallback to software checksum calculation.
+ *
+ * Packets that won't trigger the COE e.g. most DSA-tagged packets will
+ * also have to be checksummed in software.
*/
if (csum_insertion &&
- priv->plat->tx_queues_cfg[queue].coe_unsupported) {
+ (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
+ !stmmac_has_ip_ethertype(skb))) {
if (unlikely(skb_checksum_help(skb)))
goto dma_map_err;
csum_insertion = !csum_insertion;
@@ -4560,11 +4652,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4828,12 +4920,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
- unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4983,7 +5074,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
- unsigned long flags;
struct sk_buff *skb;
u32 hash;
@@ -4994,10 +5084,15 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
}
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5008,10 +5103,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n++;
- rxq_stats->rx_bytes += len;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5093,7 +5188,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
- unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5248,9 +5342,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5288,7 +5382,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
- unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5510,10 +5603,17 @@ drain_data:
/* Got entire packet into SKB. Finish it. */
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5541,11 +5641,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_packets += rx_packets;
- rxq_stats->rx_bytes += rx_bytes;
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5560,13 +5660,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_priv *priv = ch->priv_data;
struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5588,13 +5687,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_txq_stats *txq_stats;
bool pending_packets = false;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
@@ -5624,17 +5722,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_rxq_stats *rxq_stats;
struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
- unsigned long flags;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
@@ -5819,6 +5916,13 @@ static int stmmac_set_features(struct net_device *netdev,
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ priv->hw->hw_vlan_en = true;
+ else
+ priv->hw->hw_vlan_en = false;
+
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+
return 0;
}
@@ -5880,7 +5984,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
pm_wakeup_event(priv->device, 0);
if (priv->dma_cap.estsel)
- stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ stmmac_est_irq_status(priv, priv, priv->dev,
&priv->xstats, tx_cnt);
if (priv->dma_cap.fpesel) {
@@ -5958,11 +6062,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -5978,11 +6077,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6004,11 +6098,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6035,11 +6124,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6182,30 +6266,23 @@ static struct dentry *stmmac_fs_dir;
static void sysfs_display_ring(void *head, int size, int extend_desc,
struct seq_file *seq, dma_addr_t dma_phy_addr)
{
- int i;
struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
struct dma_desc *p = (struct dma_desc *)head;
+ unsigned int desc_size;
dma_addr_t dma_addr;
+ int i;
+ desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
for (i = 0; i < size; i++) {
- if (extend_desc) {
- dma_addr = dma_phy_addr + i * sizeof(*ep);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(ep->basic.des0),
- le32_to_cpu(ep->basic.des1),
- le32_to_cpu(ep->basic.des2),
- le32_to_cpu(ep->basic.des3));
- ep++;
- } else {
- dma_addr = dma_phy_addr + i * sizeof(*p);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(p->des0), le32_to_cpu(p->des1),
- le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ dma_addr = dma_phy_addr + i * desc_size;
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ if (extend_desc)
+ p = &(++ep)->basic;
+ else
p++;
- }
- seq_printf(seq, "\n");
}
}
@@ -6960,10 +7037,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 tx_bytes;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- tx_packets = txq_stats->tx_packets;
- tx_bytes = txq_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
@@ -6975,10 +7055,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 rx_bytes;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- rx_packets = rxq_stats->rx_packets;
- rx_bytes = rxq_stats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -7372,9 +7452,16 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->xstats.txq_stats[i].syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+ }
+
+ priv->xstats.pcpu_stats =
+ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+ if (!priv->xstats.pcpu_stats)
+ return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
@@ -7440,6 +7527,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
+ /* Wait a bit for the reset to take effect */
+ udelay(10);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
@@ -7455,6 +7545,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+ ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
@@ -7521,6 +7612,9 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1ffde555da47..70eadc83ca68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -296,62 +296,80 @@ out:
}
/**
- * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
- * @plat: driver data platform structure
- * @np: device tree node
- * @dev: device pointer
- * Description:
- * The mdio bus will be allocated in case of a phy transceiver is on board;
- * it will be NULL if the fixed-link is configured.
- * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
- * in any case (for DSA, mdio must be registered even if fixed-link).
- * The table below sums the supported configurations:
- * -------------------------------
- * snps,phy-addr | Y
- * -------------------------------
- * phy-handle | Y
- * -------------------------------
- * fixed-link | N
- * -------------------------------
- * snps,dwmac-mdio |
- * even if | Y
- * fixed-link |
- * -------------------------------
+ * stmmac_of_get_mdio() - Gets the MDIO bus from the devicetree.
+ * @np: devicetree node
*
- * It returns 0 in case of success otherwise -ENODEV.
+ * The MDIO bus will be searched for in the following ways:
+ * 1. The compatible is "snps,dwc-qos-ethernet-4.10" && a "mdio" named
+ * child node exists
+ * 2. A child node with the "snps,dwmac-mdio" compatible is present
+ *
+ * Return: The MDIO node if present otherwise NULL
*/
-static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
- struct device_node *np, struct device *dev)
+static struct device_node *stmmac_of_get_mdio(struct device_node *np)
{
- bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
};
+ struct device_node *mdio_node = NULL;
if (of_match_node(need_mdio_ids, np)) {
- plat->mdio_node = of_get_child_by_name(np, "mdio");
+ mdio_node = of_get_child_by_name(np, "mdio");
} else {
/**
* If snps,dwmac-mdio is passed from DT, always register
* the MDIO
*/
- for_each_child_of_node(np, plat->mdio_node) {
- if (of_device_is_compatible(plat->mdio_node,
+ for_each_child_of_node(np, mdio_node) {
+ if (of_device_is_compatible(mdio_node,
"snps,dwmac-mdio"))
break;
}
}
- if (plat->mdio_node) {
+ return mdio_node;
+}
+
+/**
+ * stmmac_mdio_setup() - Populate platform related MDIO structures.
+ * @plat: driver data platform structure
+ * @np: devicetree node
+ * @dev: device pointer
+ *
+ * This searches for MDIO information from the devicetree.
+ * If an MDIO node is found, it's assigned to plat->mdio_node and
+ * plat->mdio_bus_data is allocated.
+ * If no connection can be determined, just plat->mdio_bus_data is allocated
+ * to indicate a bus should be created and scanned for a phy.
+ * If it's determined there's no MDIO bus needed, both are left NULL.
+ *
+ * This expects that plat->phy_node has already been searched for.
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+static int stmmac_mdio_setup(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+{
+ bool legacy_mdio;
+
+ plat->mdio_node = stmmac_of_get_mdio(np);
+ if (plat->mdio_node)
dev_dbg(dev, "Found MDIO subnode\n");
- mdio = true;
- }
- if (mdio) {
- plat->mdio_bus_data =
- devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
- GFP_KERNEL);
+ /* Legacy devicetrees allowed for no MDIO bus description and expect
+ * the bus to be scanned for devices. If there's no phy or fixed-link
+ * described assume this is the case since there must be something
+ * connected to the MAC.
+ */
+ legacy_mdio = !of_phy_is_fixed_link(np) && !plat->phy_node;
+ if (legacy_mdio)
+ dev_info(dev, "Deprecated MDIO bus assumption used\n");
+
+ if (plat->mdio_node || legacy_mdio) {
+ plat->mdio_bus_data = devm_kzalloc(dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
if (!plat->mdio_bus_data)
return -ENOMEM;
@@ -471,8 +489,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- /* To Configure PHY by using all device-tree supported properties */
- rc = stmmac_dt_phy(plat, np, &pdev->dev);
+ rc = stmmac_mdio_setup(plat, np, &pdev->dev);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index bffa5c017032..e04830a3a1fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -72,7 +72,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
est_rst = true;
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
priv->plat->est->enable = true;
- ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6ad3e0a11936..26fa33e5ec34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -975,6 +975,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
+ if (qopt->cycle_time_extension >= BIT(wid + 7))
+ return -ERANGE;
if (!plat->est) {
plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
@@ -1041,6 +1043,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
+ priv->plat->est->ter = qopt->cycle_time_extension;
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1051,7 +1055,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
*/
priv->plat->fpe_cfg->enable = fpe;
- ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret) {
@@ -1072,7 +1076,7 @@ disable:
if (priv->plat->est) {
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index e60b557d59b9..1530d13984d4 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -134,14 +134,16 @@ config TI_K3_AM65_CPTS
protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
and PCIe Subsystem Precision Time Measurement (PTM).
-config TI_AM65_CPSW_TAS
- bool "Enable TAS offload in AM65 CPSW"
+config TI_AM65_CPSW_QOS
+ bool "Enable QoS offload features in AM65 CPSW"
depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
help
- Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
- AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
- defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
- TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+ This option enables QoS offload features in AM65 CPSW like
+ Time Aware Shaper (TAS) / Enhanced Scheduled Traffic (EST),
+ MQPRIO qdisc offload and Frame-Preemption MAC Merge / Interspersing
+ Express Traffic (IET).
+ The EST scheduler runs on CPTS and the TAS/EST schedule is
+ updated in the Fetch RAM memory of the CPSW.
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
@@ -187,6 +189,7 @@ config TI_ICSSG_PRUETH
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 27de1d697134..d8590304f3df 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -26,7 +26,8 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o
+ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c51e2af91f69..35fceba01ea4 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -11,6 +11,7 @@
#include <linux/pm_runtime.h>
#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
#include "cpsw_ale.h"
#include "am65-cpts.h"
@@ -662,6 +663,34 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
hw_stats[i].offset);
}
+static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_stats_regs __iomem *stats;
+
+ stats = port->stat_base;
+
+ if (s->src != ETHTOOL_MAC_STATS_SRC_AGGREGATE)
+ return;
+
+ s->FramesTransmittedOK = readl_relaxed(&stats->tx_good_frames);
+ s->SingleCollisionFrames = readl_relaxed(&stats->tx_single_coll_frames);
+ s->MultipleCollisionFrames = readl_relaxed(&stats->tx_mult_coll_frames);
+ s->FramesReceivedOK = readl_relaxed(&stats->rx_good_frames);
+ s->FrameCheckSequenceErrors = readl_relaxed(&stats->rx_crc_errors);
+ s->AlignmentErrors = readl_relaxed(&stats->rx_align_code_errors);
+ s->OctetsTransmittedOK = readl_relaxed(&stats->tx_octets);
+ s->FramesWithDeferredXmissions = readl_relaxed(&stats->tx_deferred_frames);
+ s->LateCollisions = readl_relaxed(&stats->tx_late_collisions);
+ s->CarrierSenseErrors = readl_relaxed(&stats->tx_carrier_sense_errors);
+ s->OctetsReceivedOK = readl_relaxed(&stats->rx_octets);
+ s->MulticastFramesXmittedOK = readl_relaxed(&stats->tx_multicast_frames);
+ s->BroadcastFramesXmittedOK = readl_relaxed(&stats->tx_broadcast_frames);
+ s->MulticastFramesReceivedOK = readl_relaxed(&stats->rx_multicast_frames);
+ s->BroadcastFramesReceivedOK = readl_relaxed(&stats->rx_broadcast_frames);
+};
+
static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
@@ -715,6 +744,240 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
return 0;
}
+static void am65_cpsw_port_iet_rx_enable(struct am65_cpsw_port *port, bool enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_IET_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_IET_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+ am65_cpsw_iet_common_enable(port->common);
+}
+
+static void am65_cpsw_port_iet_tx_enable(struct am65_cpsw_port *port, bool enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ if (enable)
+ val |= AM65_CPSW_PN_IET_MAC_PENABLE;
+ else
+ val &= ~AM65_CPSW_PN_IET_MAC_PENABLE;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+}
+
+static int am65_cpsw_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_ndev_priv *priv = netdev_priv(ndev);
+ u32 port_ctrl, iet_ctrl, iet_status;
+ u32 add_frag_size;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ iet_ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ port_ctrl = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+
+ state->tx_enabled = !!(iet_ctrl & AM65_CPSW_PN_IET_MAC_PENABLE);
+ state->pmac_enabled = !!(port_ctrl & AM65_CPSW_PN_CTL_IET_PORT_EN);
+
+ iet_status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
+
+ if (iet_ctrl & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ else if (iet_status & AM65_CPSW_PN_MAC_VERIFIED)
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ else if (iet_status & AM65_CPSW_PN_MAC_VERIFY_FAIL)
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ else
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+
+ add_frag_size = AM65_CPSW_PN_IET_MAC_GET_ADDFRAGSIZE(iet_ctrl);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);
+
+ /* Errata i2208: RX min fragment size cannot be less than 124 */
+ state->rx_min_frag_size = 124;
+
+ /* FPE active if common tx_enabled and verification success or disabled (forced) */
+ state->tx_active = state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED);
+ state->verify_enabled = !(iet_ctrl & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY);
+
+ state->verify_time = port->qos.iet.verify_time_ms;
+
+ /* 802.3-2018 clause 30.14.1.6, says that the aMACMergeVerifyTime
+ * variable has a range between 1 and 128 ms inclusive. Limit to that.
+ */
+ state->max_verify_time = 128;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int am65_cpsw_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_ndev_priv *priv = netdev_priv(ndev);
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ if (cfg->pmac_enabled) {
+ /* change TX & RX FIFO MAX_BLKS as per TRM recommendation */
+ if (!iet->original_max_blks)
+ iet->original_max_blks = readl(port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+
+ writel(AM65_CPSW_PN_TX_RX_MAX_BLKS_IET,
+ port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+ } else if (iet->original_max_blks) {
+ /* restore RX & TX FIFO MAX_BLKS */
+ writel(iet->original_max_blks,
+ port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+ }
+
+ am65_cpsw_port_iet_rx_enable(port, cfg->pmac_enabled);
+ am65_cpsw_port_iet_tx_enable(port, cfg->tx_enabled);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ if (cfg->verify_enabled) {
+ val &= ~AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
+ /* Reset Verify state machine. Verification won't start here.
+ * Verification will be done once link-up.
+ */
+ val |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ } else {
+ val |= AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
+ /* Clear LINKFAIL to allow verify/response packets */
+ val &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ }
+
+ val &= ~AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK;
+ val |= AM65_CPSW_PN_IET_MAC_SET_ADDFRAGSIZE(add_frag_size);
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ /* verify_timeout_count can only be set at valid link */
+ port->qos.iet.verify_time_ms = cfg->verify_time;
+
+ /* enable/disable preemption based on link status */
+ am65_cpsw_iet_commit_preemptible_tcs(port);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static void am65_cpsw_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ void __iomem *base = port->stat_base;
+
+ s->MACMergeFrameAssOkCount = readl(base + AM65_CPSW_STATN_IET_RX_ASSEMBLY_OK);
+ s->MACMergeFrameAssErrorCount = readl(base + AM65_CPSW_STATN_IET_RX_ASSEMBLY_ERROR);
+ s->MACMergeFrameSmdErrorCount = readl(base + AM65_CPSW_STATN_IET_RX_SMD_ERROR);
+ /* CPSW Functional Spec states:
+ * "The IET stat aMACMergeFragCountRx is derived by adding the
+ * Receive Assembly Error count to this value. i.e. AM65_CPSW_STATN_IET_RX_FRAG"
+ */
+ s->MACMergeFragCountRx = readl(base + AM65_CPSW_STATN_IET_RX_FRAG) + s->MACMergeFrameAssErrorCount;
+ s->MACMergeFragCountTx = readl(base + AM65_CPSW_STATN_IET_TX_FRAG);
+ s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
+}
+
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ tx_chn = &common->tx_chns[0];
+
+ coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &common->tx_chns[queue];
+
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ tx_chn = &common->tx_chns[0];
+
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+ tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &common->tx_chns[queue];
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
+ dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
+ queue);
+ coal->tx_coalesce_usecs = 20;
+ }
+
+ tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
@@ -729,9 +992,15 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_sset_count = am65_cpsw_get_sset_count,
.get_strings = am65_cpsw_get_strings,
.get_ethtool_stats = am65_cpsw_get_ethtool_stats,
+ .get_eth_mac_stats = am65_cpsw_get_eth_mac_stats,
.get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_coalesce = am65_cpsw_get_coalesce,
+ .set_coalesce = am65_cpsw_set_coalesce,
+ .get_per_queue_coalesce = am65_cpsw_get_per_queue_coalesce,
+ .set_per_queue_coalesce = am65_cpsw_set_per_queue_coalesce,
.get_link = ethtool_op_get_link,
.get_link_ksettings = am65_cpsw_get_link_ksettings,
@@ -743,4 +1012,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_eee = am65_cpsw_get_eee,
.set_eee = am65_cpsw_set_eee,
.nway_reset = am65_cpsw_nway_reset,
+ .get_mm = am65_cpsw_get_mm,
+ .set_mm = am65_cpsw_set_mm,
+ .get_mm_stats = am65_cpsw_get_mm_stats,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index ece9f8df98ae..2939a21ca74f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -56,7 +56,7 @@
#define AM65_CPSW_MAX_PORTS 8
#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
-#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define AM65_CPSW_MAX_PACKET_SIZE 2024
#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_REG_STAT_PORT_EN 0x014
@@ -136,6 +136,8 @@
NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+#define AM65_CPSW_DEFAULT_TX_CHNS 8
+
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
{
@@ -292,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
txqueue,
netif_tx_queue_stopped(netif_txq),
jiffies_to_msecs(jiffies - trans_start),
- dql_avail(&netif_txq->dql),
+ netdev_queue_dql_avail(netif_txq),
k3_cppi_desc_pool_avail(tx_chn->desc_pool));
if (netif_tx_queue_stopped(netif_txq)) {
@@ -367,10 +369,81 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- int port_idx, i, ret;
+ int port_idx, i, ret, tx;
struct sk_buff *skb;
u32 val, port_mask;
@@ -437,8 +510,12 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
AM65_CPSW_MAX_PACKET_SIZE,
GFP_KERNEL);
if (!skb) {
+ ret = -ENOMEM;
dev_err(common->dev, "cannot allocate skb\n");
- return -ENOMEM;
+ if (i)
+ goto fail_rx;
+
+ return ret;
}
ret = am65_cpsw_nuss_rx_push(common, skb);
@@ -447,17 +524,28 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
"cannot submit skb to channel rx, error %d\n",
ret);
kfree_skb(skb);
+ if (i)
+ goto fail_rx;
+
return ret;
}
- kmemleak_not_leak(skb);
}
- k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
- for (i = 0; i < common->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
- if (ret)
- return ret;
- napi_enable(&common->tx_chns[i].napi_tx);
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto fail_rx;
+ }
+
+ for (tx = 0; tx < common->tx_ch_num; tx++) {
+ ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
+ tx, ret);
+ tx--;
+ goto fail_tx;
+ }
+ napi_enable(&common->tx_chns[tx].napi_tx);
}
napi_enable(&common->napi_rx);
@@ -468,10 +556,22 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-}
-static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
-static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+fail_tx:
+ while (tx >= 0) {
+ napi_disable(&common->tx_chns[tx].napi_tx);
+ k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn);
+ tx--;
+ }
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+
+fail_rx:
+ k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
+ &common->rx_chns,
+ am65_cpsw_nuss_rx_cleanup, 0);
+ return ret;
+}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
@@ -496,8 +596,10 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++)
+ for (i = 0; i < common->tx_ch_num; i++) {
napi_disable(&common->tx_chns[i].napi_tx);
+ hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
+ }
for (i = 0; i < common->tx_ch_num; i++) {
k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
@@ -516,6 +618,7 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
}
napi_disable(&common->napi_rx);
+ hrtimer_cancel(&common->rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
@@ -646,27 +749,6 @@ runtime_put:
return ret;
}
-static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct am65_cpsw_rx_chn *rx_chn = data;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
-}
-
static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
{
struct skb_shared_hwtstamps *ssh;
@@ -806,6 +888,15 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret;
}
+static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
+{
+ struct am65_cpsw_common *common =
+ container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+
+ enable_irq(common->rx_chns.irq);
+ return HRTIMER_NORESTART;
+}
+
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
@@ -833,63 +924,19 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (common->rx_irq_disabled) {
common->rx_irq_disabled = false;
- enable_irq(common->rx_chns.irq);
+ if (unlikely(common->rx_pace_timeout)) {
+ hrtimer_start(&common->rx_hrtimer,
+ ns_to_ktime(common->rx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(common->rx_chns.irq);
+ }
}
}
return num_rx;
}
-static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct cppi5_host_desc_t *desc)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc;
- dma_addr_t buf_dma, next_desc_dma;
- u32 buf_dma_len;
-
- first_desc = desc;
- next_desc = first_desc;
-
- cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
- while (next_desc_dma) {
- next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- next_desc_dma);
- cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- }
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
-}
-
-static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct am65_cpsw_tx_chn *tx_chn = data;
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- dev_kfree_skb_any(skb);
-}
-
static struct sk_buff *
am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
dma_addr_t desc_dma)
@@ -939,7 +986,7 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
}
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
- int chn, unsigned int budget)
+ int chn, unsigned int budget, bool *tdown)
{
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
@@ -962,6 +1009,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
+ *tdown = true;
break;
}
@@ -984,7 +1032,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
}
static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget)
+ int chn, unsigned int budget, bool *tdown)
{
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
@@ -1005,6 +1053,7 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
+ *tdown = true;
break;
}
@@ -1030,21 +1079,40 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
return num_tx;
}
+static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
+{
+ struct am65_cpsw_tx_chn *tx_chns =
+ container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer);
+
+ enable_irq(tx_chns->irq);
+ return HRTIMER_NORESTART;
+}
+
static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
{
struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
+ bool tdown = false;
int num_tx;
if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
+ num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
+ budget, &tdown);
else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
- if (napi_complete_done(napi_tx, num_tx))
- enable_irq(tx_chn->irq);
+ if (napi_complete_done(napi_tx, num_tx)) {
+ if (unlikely(tx_chn->tx_pace_timeout && !tdown)) {
+ hrtimer_start(&tx_chn->tx_hrtimer,
+ ns_to_ktime(tx_chn->tx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(tx_chn->irq);
+ }
+ }
return 0;
}
@@ -1676,6 +1744,8 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
+ hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -1901,6 +1971,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
netif_napi_add(common->dma_ndev, &common->napi_rx,
am65_cpsw_nuss_rx_poll);
+ hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
ret = devm_request_irq(dev, rx_chn->irq,
am65_cpsw_nuss_rx_irq,
@@ -2098,6 +2170,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
dev_err(dev, "Use random MAC address\n");
}
}
+
+ /* Reset all Queue priorities to 0 */
+ writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
}
of_node_put(node);
@@ -2162,12 +2237,15 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
ndev_priv = netdev_priv(port->ndev);
ndev_priv->port = port;
ndev_priv->msg_enable = AM65_CPSW_DEBUG;
+ mutex_init(&ndev_priv->mm_lock);
+ port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
- port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
+ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
+ (VLAN_ETH_HLEN + ETH_FCS_LEN);
port->ndev->hw_features = NETIF_F_SG |
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM |
@@ -2897,7 +2975,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
- common->tx_ch_num = 1;
+ common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1;
@@ -2999,7 +3077,7 @@ err_pm_clear:
return ret;
}
-static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+static void am65_cpsw_nuss_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct am65_cpsw_common *common;
@@ -3008,8 +3086,14 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
common = dev_get_drvdata(dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ /* Note, if this error path is taken, we're leaking some
+ * resources.
+ */
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
@@ -3027,7 +3111,6 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int am65_cpsw_nuss_suspend(struct device *dev)
@@ -3127,7 +3210,7 @@ static struct platform_driver am65_cpsw_nuss_driver = {
.pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
- .remove = am65_cpsw_nuss_remove,
+ .remove_new = am65_cpsw_nuss_remove,
};
module_platform_driver(am65_cpsw_nuss_driver);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index f3dad2ab9828..7da0492dc091 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -75,6 +75,8 @@ struct am65_cpsw_tx_chn {
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_tx_channel *tx_chn;
spinlock_t lock; /* protect TX rings in multi-port mode */
+ struct hrtimer tx_hrtimer;
+ unsigned long tx_pace_timeout;
int irq;
u32 id;
u32 descs_num;
@@ -138,6 +140,8 @@ struct am65_cpsw_common {
struct napi_struct napi_rx;
bool rx_irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -145,6 +149,7 @@ struct am65_cpsw_common {
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
+ bool iet_enabled;
bool is_emac_mode;
u16 br_members;
@@ -170,6 +175,10 @@ struct am65_cpsw_ndev_priv {
struct am65_cpsw_port *port;
struct am65_cpsw_ndev_stats __percpu *stats;
bool offload_fwd_mark;
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
};
#define am65_ndev_to_priv(ndev) \
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 9ac2ff05d501..816e73a3d6e4 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -4,10 +4,13 @@
*
* quality of service module includes:
* Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ * Interspersed Express Traffic (IET - P802.3br/D2.0)
*/
#include <linux/pm_runtime.h>
+#include <linux/math.h>
#include <linux/time.h>
+#include <linux/units.h>
#include <net/pkt_cls.h>
#include "am65-cpsw-nuss.h"
@@ -15,40 +18,7 @@
#include "am65-cpts.h"
#include "cpsw_ale.h"
-#define AM65_CPSW_REG_CTL 0x004
-#define AM65_CPSW_PN_REG_CTL 0x004
-#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
-#define AM65_CPSW_PN_REG_EST_CTL 0x060
-#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
-
-/* AM65_CPSW_REG_CTL register fields */
-#define AM65_CPSW_CTL_EST_EN BIT(18)
-
-/* AM65_CPSW_PN_REG_CTL register fields */
-#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
-
-/* AM65_CPSW_PN_REG_EST_CTL register fields */
-#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
-#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
-#define AM65_CPSW_PN_EST_TS_EN BIT(2)
-#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
-#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
-#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
-
-/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
-#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
-#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
-#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
-#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
-#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
-
-/* EST FETCH COMMAND RAM */
-#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
-#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
-#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
-#define AM65_CPSW_FETCH_CNT_OFFSET 8
-#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
-#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+#define TO_MBPS(x) DIV_ROUND_UP((x), BYTES_PER_MBIT)
enum timer_act {
TACT_PROG, /* need program timer */
@@ -56,6 +26,412 @@ enum timer_act {
TACT_SKIP_PROG, /* just buffer can be updated */
};
+static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs);
+
+static u32
+am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
+{
+ u32 ir;
+
+ bus_freq /= 1000000;
+ ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
+ return ir;
+}
+
+static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port)
+{
+ int prio;
+
+ for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) {
+ writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
+ writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
+ }
+}
+
+static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct am65_cpsw_common *common = port->common;
+ struct tc_mqprio_qopt_offload *mqprio;
+ bool enable, shaper_susp = false;
+ u32 rate_mbps;
+ int tc, prio;
+
+ mqprio = &p_mqprio->mqprio_hw;
+ /* takes care of no link case as well */
+ if (p_mqprio->max_rate_total > port->qos.link_speed)
+ shaper_susp = true;
+
+ am65_cpsw_tx_pn_shaper_reset(port);
+
+ enable = p_mqprio->shaper_en && !shaper_susp;
+ if (!enable)
+ return;
+
+ /* Rate limit is specified per Traffic Class but
+ * for CPSW, rate limit can be applied per priority
+ * at port FIFO.
+ *
+ * We have assigned the same priority (TCn) to all queues
+ * of a Traffic Class so they share the same shaper
+ * bandwidth.
+ */
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ prio = tc;
+
+ rate_mbps = TO_MBPS(mqprio->min_rate[tc]);
+ rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+ common->bus_freq);
+ writel(rate_mbps,
+ port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
+
+ rate_mbps = 0;
+
+ if (mqprio->max_rate[tc]) {
+ rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc];
+ rate_mbps = TO_MBPS(rate_mbps);
+ rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+ common->bus_freq);
+ }
+
+ writel(rate_mbps,
+ port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
+ }
+}
+
+static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct netlink_ext_ack *extack = mqprio->extack;
+ u64 min_rate_total = 0, max_rate_total = 0;
+ u32 min_rate_msk = 0, max_rate_msk = 0;
+ bool has_min_rate, has_max_rate;
+ int num_tc, i;
+
+ if (!(mqprio->flags & TC_MQPRIO_F_SHAPER))
+ return 0;
+
+ if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+ return 0;
+
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+ if (!has_min_rate && has_max_rate) {
+ NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate");
+ return -EOPNOTSUPP;
+ }
+
+ if (!has_min_rate)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
+
+ for (i = num_tc - 1; i >= 0; i--) {
+ u32 ch_msk;
+
+ if (mqprio->min_rate[i])
+ min_rate_msk |= BIT(i);
+ min_rate_total += mqprio->min_rate[i];
+
+ if (has_max_rate) {
+ if (mqprio->max_rate[i])
+ max_rate_msk |= BIT(i);
+ max_rate_total += mqprio->max_rate[i];
+
+ if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TX tc%d rate max>0 but min=0",
+ i);
+ return -EINVAL;
+ }
+
+ if (mqprio->max_rate[i] &&
+ mqprio->max_rate[i] < mqprio->min_rate[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TX tc%d rate min(%llu)>max(%llu)",
+ i, mqprio->min_rate[i],
+ mqprio->max_rate[i]);
+ return -EINVAL;
+ }
+ }
+
+ ch_msk = GENMASK(num_tc - 1, i);
+ if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Min rate must be set sequentially hi->lo tx_rate_msk%x",
+ min_rate_msk);
+ return -EINVAL;
+ }
+
+ if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Max rate must be set sequentially hi->lo tx_rate_msk%x",
+ max_rate_msk);
+ return -EINVAL;
+ }
+ }
+
+ min_rate_total = TO_MBPS(min_rate_total);
+ max_rate_total = TO_MBPS(max_rate_total);
+
+ p_mqprio->shaper_en = true;
+ p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total);
+
+ return 0;
+}
+
+static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+
+ p_mqprio->shaper_en = false;
+ p_mqprio->max_rate_total = 0;
+
+ am65_cpsw_tx_pn_shaper_reset(port);
+ netdev_reset_tc(ndev);
+
+ /* Reset all Queue priorities to 0 */
+ writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
+
+ am65_cpsw_iet_change_preemptible_tcs(port, 0);
+}
+
+static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct am65_cpsw_common *common = port->common;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ int i, tc, offset, count, prio, ret;
+ u8 num_tc = qopt->num_tc;
+ u32 tx_prio_map = 0;
+
+ memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ if (!num_tc) {
+ am65_cpsw_reset_tc_mqprio(ndev);
+ ret = 0;
+ goto exit_put;
+ }
+
+ ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
+ if (ret)
+ goto exit_put;
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ /* Multiple Linux priorities can map to a Traffic Class
+ * A Traffic Class can have multiple contiguous Queues,
+ * Queues get mapped to Channels (thread_id),
+ * if not VLAN tagged, thread_id is used as packet_priority
+ * if VLAN tagged. VLAN priority is used as packet_priority
+ * packet_priority gets mapped to header_priority in p0_rx_pri_map,
+ * header_priority gets mapped to switch_priority in pn_tx_pri_map.
+ * As p0_rx_pri_map is left at defaults (0x76543210), we can
+ * assume that Queue_n gets mapped to header_priority_n. We can then
+ * set the switch priority in pn_tx_pri_map.
+ */
+
+ for (tc = 0; tc < num_tc; tc++) {
+ prio = tc;
+
+ /* For simplicity we assign the same priority (TCn) to
+ * all queues of a Traffic Class.
+ */
+ for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
+ tx_prio_map |= prio << (4 * i);
+
+ count = qopt->count[tc];
+ offset = qopt->offset[tc];
+ netdev_set_tc_queue(ndev, tc, count, offset);
+ }
+
+ writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
+
+ am65_cpsw_tx_pn_shaper_apply(port);
+ am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs);
+
+exit_put:
+ pm_runtime_put(common->dev);
+
+ return ret;
+}
+
+static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
+{
+ int verify_time_ms = port->qos.iet.verify_time_ms;
+ u32 val;
+
+ /* The number of wireside clocks contained in the verify
+ * timeout counter. The default is 0x1312d0
+ * (10ms at 125Mhz in 1G mode).
+ */
+ val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
+
+ val /= MILLIHZ_PER_HZ; /* count per ms timeout */
+ val *= verify_time_ms; /* count for timeout ms */
+
+ if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
+ return -EINVAL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
+
+ return 0;
+}
+
+static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
+{
+ u32 ctrl, status;
+ int try;
+
+ try = 20;
+ do {
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ /* Clear MAC_LINKFAIL bit to start Verify. */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ msleep(port->qos.iet.verify_time_ms);
+
+ status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
+ if (status & AM65_CPSW_PN_MAC_VERIFIED)
+ return 0;
+
+ if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
+ netdev_dbg(port->ndev,
+ "MAC Merge verify failed, trying again\n");
+ continue;
+ }
+
+ if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) {
+ netdev_dbg(port->ndev, "MAC Merge respond error\n");
+ return -ENODEV;
+ }
+
+ if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) {
+ netdev_dbg(port->ndev, "MAC Merge verify error\n");
+ return -ENODEV;
+ }
+ } while (try-- > 0);
+
+ netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
+ val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+}
+
+/* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
+ * UAPI doesn't allow tx enable without rx enable.
+ */
+void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ bool rx_enable = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN);
+ if (rx_enable)
+ break;
+ }
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (rx_enable)
+ val |= AM65_CPSW_CTL_IET_EN;
+ else
+ val &= ~AM65_CPSW_CTL_IET_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->iet_enabled = rx_enable;
+}
+
+/* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
+ * (active/inactive), but the preemptible traffic classes should only be
+ * committed to hardware once TX is active. Resort to polling.
+ */
+void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
+{
+ u8 preemptible_tcs;
+ int err;
+ u32 val;
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN))
+ return;
+
+ /* update common IET enable */
+ am65_cpsw_iet_common_enable(port->common);
+
+ /* update verify count */
+ err = am65_cpsw_iet_set_verify_timeout_count(port);
+ if (err) {
+ netdev_err(port->ndev, "couldn't set verify count: %d\n", err);
+ return;
+ }
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
+ err = am65_cpsw_iet_verify_wait(port);
+ if (err)
+ return;
+ }
+
+ preemptible_tcs = port->qos.iet.preemptible_tcs;
+ am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs);
+}
+
+static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+
+ port->qos.iet.preemptible_tcs = preemptible_tcs;
+ mutex_lock(&priv->mm_lock);
+ am65_cpsw_iet_commit_preemptible_tcs(port);
+ mutex_unlock(&priv->mm_lock);
+}
+
+static void am65_cpsw_iet_link_state_update(struct net_device *ndev)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ mutex_lock(&priv->mm_lock);
+ am65_cpsw_iet_commit_preemptible_tcs(port);
+ mutex_unlock(&priv->mm_lock);
+}
+
static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
{
return port->qos.est_oper || port->qos.est_admin;
@@ -428,7 +804,7 @@ static void am65_cpsw_stop_est(struct net_device *ndev)
am65_cpsw_timer_stop(ndev);
}
-static void am65_cpsw_purge_est(struct net_device *ndev)
+static void am65_cpsw_taprio_destroy(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
@@ -439,31 +815,74 @@ static void am65_cpsw_purge_est(struct net_device *ndev)
port->qos.est_oper = NULL;
port->qos.est_admin = NULL;
+
+ am65_cpsw_reset_tc_mqprio(ndev);
}
-static int am65_cpsw_configure_taprio(struct net_device *ndev,
- struct am65_cpsw_est *est_new)
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_taprio_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct netlink_ext_ack *extack = taprio->mqprio.extack;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = common->cpts;
- int ret = 0, tact = TACT_PROG;
+ struct am65_cpsw_est *est_new;
+ int ret, tact;
- am65_cpsw_est_update_state(ndev);
+ if (!netif_running(ndev)) {
+ NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown");
+ return -ENETDOWN;
+ }
- if (est_new->taprio.cmd == TAPRIO_CMD_DESTROY) {
- am65_cpsw_stop_est(ndev);
- return ret;
+ if (common->pf_p0_rx_ptype_rrobin) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
+ return -EINVAL;
}
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ if (taprio->cycle_time_extension) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ est_new = devm_kzalloc(&ndev->dev,
+ struct_size(est_new, taprio.entries, taprio->num_entries),
+ GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio);
+ if (ret)
+ return ret;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+
+ am65_cpsw_est_update_state(ndev);
+
ret = am65_cpsw_est_check_scheds(ndev, est_new);
if (ret < 0)
- return ret;
+ goto fail;
tact = am65_cpsw_timer_act(ndev, est_new);
if (tact == TACT_NEED_STOP) {
- dev_err(&ndev->dev,
- "Can't toggle estf timer, stop taprio first");
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't toggle estf timer, stop taprio first");
+ ret = -EINVAL;
+ goto fail;
}
if (tact == TACT_PROG)
@@ -476,62 +895,26 @@ static int am65_cpsw_configure_taprio(struct net_device *ndev,
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
- am65_cpsw_est_set(ndev, est_new->taprio.cmd == TAPRIO_CMD_REPLACE);
+ am65_cpsw_est_set(ndev, 1);
if (tact == TACT_PROG) {
ret = am65_cpsw_timer_set(ndev, est_new);
if (ret) {
- dev_err(&ndev->dev, "Failed to set cycle time");
- return ret;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set cycle time");
+ goto fail;
}
}
- return ret;
-}
-
-static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
- struct tc_taprio_qopt_offload *to)
-{
- int i;
-
- *to = *from;
- for (i = 0; i < from->num_entries; i++)
- to->entries[i] = from->entries[i];
-}
-
-static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct tc_taprio_qopt_offload *taprio = type_data;
- struct am65_cpsw_est *est_new;
- int ret = 0;
-
- if (taprio->cycle_time_extension) {
- dev_err(&ndev->dev, "Failed to set cycle time extension");
- return -EOPNOTSUPP;
- }
-
- est_new = devm_kzalloc(&ndev->dev,
- struct_size(est_new, taprio.entries, taprio->num_entries),
- GFP_KERNEL);
- if (!est_new)
- return -ENOMEM;
-
- am65_cpsw_cp_taprio(taprio, &est_new->taprio);
- ret = am65_cpsw_configure_taprio(ndev, est_new);
- if (!ret) {
- if (taprio->cmd == TAPRIO_CMD_REPLACE) {
- devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+ port->qos.est_admin = est_new;
+ am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs);
- port->qos.est_admin = est_new;
- } else {
- devm_kfree(&ndev->dev, est_new);
- am65_cpsw_purge_est(ndev);
- }
- } else {
- devm_kfree(&ndev->dev, est_new);
- }
+ return 0;
+fail:
+ am65_cpsw_reset_tc_mqprio(ndev);
+ devm_kfree(&ndev->dev, est_new);
return ret;
}
@@ -541,7 +924,6 @@ static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
ktime_t cur_time;
s64 delta;
- port->qos.link_speed = link_speed;
if (!am65_cpsw_port_est_enabled(port))
return;
@@ -558,37 +940,26 @@ static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
return;
purge_est:
- am65_cpsw_purge_est(ndev);
+ am65_cpsw_taprio_destroy(ndev);
}
static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct tc_taprio_qopt_offload *taprio = type_data;
- struct am65_cpsw_common *common = port->common;
-
- if (taprio->cmd != TAPRIO_CMD_REPLACE &&
- taprio->cmd != TAPRIO_CMD_DESTROY)
- return -EOPNOTSUPP;
-
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return -ENODEV;
-
- if (!netif_running(ndev)) {
- dev_err(&ndev->dev, "interface is down, link speed unknown\n");
- return -ENETDOWN;
- }
-
- if (common->pf_p0_rx_ptype_rrobin) {
- dev_err(&ndev->dev,
- "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
- return -EINVAL;
+ int err = 0;
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = am65_cpsw_taprio_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ am65_cpsw_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
}
- if (port->qos.link_speed == SPEED_UNKNOWN)
- return -ENOLINK;
-
- return am65_cpsw_set_taprio(ndev, type_data);
+ return err;
}
static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
@@ -596,12 +967,17 @@ static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
struct tc_query_caps_base *base = type_data;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
+
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return -EOPNOTSUPP;
-
caps->gate_mask_per_txq = true;
return 0;
@@ -787,55 +1163,6 @@ static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_blo
port, port, true);
}
-int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_QUERY_CAPS:
- return am65_cpsw_tc_query_caps(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return am65_cpsw_setup_taprio(ndev, type_data);
- case TC_SETUP_BLOCK:
- return am65_cpsw_qos_setup_tc_block(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
-
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return;
-
- am65_cpsw_est_link_up(ndev, link_speed);
- port->qos.link_down_time = 0;
-}
-
-void am65_cpsw_qos_link_down(struct net_device *ndev)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
-
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return;
-
- if (!port->qos.link_down_time)
- port->qos.link_down_time = ktime_get();
-
- port->qos.link_speed = SPEED_UNKNOWN;
-}
-
-static u32
-am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
-{
- u32 ir;
-
- bus_freq /= 1000000;
- ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
- return ir;
-}
-
static void
am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
int tx_ch, u32 rate_mbps)
@@ -937,3 +1264,44 @@ void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
}
}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return am65_cpsw_tc_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return am65_cpsw_setup_mqprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ port->qos.link_speed = link_speed;
+ am65_cpsw_tx_pn_shaper_apply(port);
+ am65_cpsw_iet_link_state_update(ndev);
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+ am65_cpsw_tx_pn_shaper_apply(port);
+ am65_cpsw_iet_link_state_update(ndev);
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index 0cc2a3b3d7f9..b328e56c5b2b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -9,6 +9,7 @@
#include <net/pkt_sched.h>
struct am65_cpsw_common;
+struct am65_cpsw_port;
struct am65_cpsw_est {
int buf;
@@ -16,6 +17,18 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_mqprio {
+ struct tc_mqprio_qopt_offload mqprio_hw;
+ u64 max_rate_total;
+ bool shaper_en;
+};
+
+struct am65_cpsw_iet {
+ u8 preemptible_tcs;
+ u32 original_max_blks;
+ int verify_time_ms;
+};
+
struct am65_cpsw_ale_ratelimit {
unsigned long cookie;
u64 rate_packet_ps;
@@ -26,16 +39,189 @@ struct am65_cpsw_qos {
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+ struct am65_cpsw_mqprio mqprio;
+ struct am65_cpsw_iet iet;
struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
};
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_P0_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_TX_PRI_MAP 0x018
+#define AM65_CPSW_PN_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_PN_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+/* number of priority queues per port FIFO */
+#define AM65_CPSW_PN_FIFO_PRIO_NUM 8
+
+#if IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS)
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
void am65_cpsw_qos_link_down(struct net_device *ndev);
int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev, int queue, u32 rate_mbps);
void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common);
+void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port);
+void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common);
+#else
+static inline int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void am65_cpsw_qos_link_up(struct net_device *ndev,
+ int link_speed)
+{ }
+
+static inline void am65_cpsw_qos_link_down(struct net_device *ndev)
+{ }
+
+static inline int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
+ int queue,
+ u32 rate_mbps)
+{
+ return 0;
+}
+
+static inline void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
+{ }
+static inline void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
+{ }
+static inline void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
+{ }
+#endif
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_MAX_BLKS 0x008
+#define AM65_CPSW_PN_REG_TX_PRI_MAP 0x018
+#define AM65_CPSW_PN_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PN_REG_IET_CTRL 0x040
+#define AM65_CPSW_PN_REG_IET_STATUS 0x044
+#define AM65_CPSW_PN_REG_IET_VERIFY 0x048
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_PN_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_IET_EN BIT(17)
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_IET_PORT_EN BIT(16)
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_IET_CTRL register fields */
+#define AM65_CPSW_PN_IET_MAC_PENABLE BIT(0)
+#define AM65_CPSW_PN_IET_MAC_DISABLEVERIFY BIT(2)
+#define AM65_CPSW_PN_IET_MAC_LINKFAIL BIT(3)
+#define AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK GENMASK(10, 8)
+#define AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET 8
+#define AM65_CPSW_PN_IET_MAC_PREMPT_MASK GENMASK(23, 16)
+#define AM65_CPSW_PN_IET_MAC_PREMPT_OFFSET 16
+
+#define AM65_CPSW_PN_IET_MAC_SET_ADDFRAGSIZE(n) (((n) << AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET) & \
+ AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK)
+#define AM65_CPSW_PN_IET_MAC_GET_ADDFRAGSIZE(n) (((n) & AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK) >> \
+ AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET)
+#define AM65_CPSW_PN_IET_MAC_SET_PREEMPT(n) (((n) << AM65_CPSW_PN_IET_MAC_PREMPT_OFFSET) & \
+ AM65_CPSW_PN_IET_MAC_PREMPT_MASK)
+#define AM65_CPSW_PN_IET_MAC_GET_PREEMPT(n) (((n) & AM65_CPSW_PN_IET_MAC_PREMPT_MASK) >> \
+ AM65_CPSW_PN_IET_MAC_PREMPT_OFFSET)
+
+/* AM65_CPSW_PN_REG_IET_STATUS register fields */
+#define AM65_CPSW_PN_MAC_STATUS GENMASK(3, 0)
+#define AM65_CPSW_PN_MAC_VERIFIED BIT(0)
+#define AM65_CPSW_PN_MAC_VERIFY_FAIL BIT(1)
+#define AM65_CPSW_PN_MAC_RESPOND_ERR BIT(2)
+#define AM65_CPSW_PN_MAC_VERIFY_ERR BIT(3)
+
+/* AM65_CPSW_PN_REG_IET_VERIFY register fields */
+#define AM65_CPSW_PN_MAC_VERIFY_CNT_MASK GENMASK(23, 0)
+#define AM65_CPSW_PN_MAC_GET_VERIFY_CNT(n) ((n) & AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
+/* 10 msec converted to NSEC */
+#define AM65_CPSW_IET_VERIFY_CNT_MS (10)
+#define AM65_CPSW_IET_VERIFY_CNT_NS (AM65_CPSW_IET_VERIFY_CNT_MS * \
+ NSEC_PER_MSEC)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+/* AM65_CPSW_PN_REG_MAX_BLKS fields for IET and No IET cases */
+/* 7 blocks for pn_rx_max_blks, 13 for pn_tx_max_blks*/
+#define AM65_CPSW_PN_TX_RX_MAX_BLKS_IET 0xD07
+
+/* Slave IET Stats. register offsets */
+#define AM65_CPSW_STATN_IET_RX_ASSEMBLY_ERROR 0x140
+#define AM65_CPSW_STATN_IET_RX_ASSEMBLY_OK 0x144
+#define AM65_CPSW_STATN_IET_RX_SMD_ERROR 0x148
+#define AM65_CPSW_STATN_IET_RX_FRAG 0x14c
+#define AM65_CPSW_STATN_IET_TX_HOLD 0x150
+#define AM65_CPSW_STATN_IET_TX_FRAG 0x154
+
+/* number of priority queues per port FIFO */
+#define AM65_CPSW_PN_FIFO_PRIO_NUM 8
#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ca4d4548f85e..c0a5abd8d9a8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
@@ -1722,14 +1724,20 @@ clean_runtime_disable_ret:
return ret;
}
-static int cpsw_remove(struct platform_device *pdev)
+static void cpsw_remove(struct platform_device *pdev)
{
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int i, ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ /* Note, if this error path is taken, we're leaking some
+ * resources.
+ */
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
@@ -1740,7 +1748,6 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1795,7 +1802,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove = cpsw_remove,
+ .remove_new = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 0e4f526b1753..087dcb67505a 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->slave_num);
return;
}
+
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
@@ -2037,14 +2040,20 @@ clean_dt_ret:
return ret;
}
-static int cpsw_remove(struct platform_device *pdev)
+static void cpsw_remove(struct platform_device *pdev)
{
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ /* Note, if this error path is taken, we're leaking some
+ * resources.
+ */
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
cpsw_unregister_notifiers(cpsw);
cpsw_unregister_devlink(cpsw);
@@ -2055,7 +2064,6 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_remove_dt(cpsw);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused cpsw_suspend(struct device *dev)
@@ -2116,7 +2124,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove = cpsw_remove,
+ .remove_new = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index bcccf43d368b..dbbea9146040 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts)
freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
}
+static void cpts_clk_unregister(void *clk)
+{
+ clk_hw_unregister_mux(clk);
+}
+
+static void cpts_clk_del_provider(void *np)
+{
+ of_clk_del_provider(np);
+}
+
static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
{
struct device_node *refclk_np;
@@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))clk_hw_unregister_mux,
- clk_hw);
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw);
if (ret) {
dev_err(cpts->dev, "add clkmux unreg action %d", ret);
goto mux_fail;
@@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
if (ret)
goto mux_fail;
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))of_clk_del_provider,
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider,
refclk_np);
if (ret) {
dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 628c87dc1d28..8e07d4a1b6ba 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -511,16 +511,12 @@ static const struct k3_mdio_soc_data am65_mdio_soc_data = {
};
static const struct soc_device_attribute k3_mdio_socinfo[] = {
- { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { .family = "AM62X", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .data = &am65_mdio_soc_data },
{ /* sentinel */ },
};
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9d535ae59626..c1b0d35c8d05 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -93,12 +93,13 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
* gelic_descr_get_status -- returns the status of a descriptor
* @descr: descriptor to look at
*
- * returns the status as in the dmac_cmd_status field of the descriptor
+ * returns the status as in the hw_regs.dmac_cmd_status field of the descriptor
*/
static enum gelic_descr_dma_status
gelic_descr_get_status(struct gelic_descr *descr)
{
- return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+ return be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
+ GELIC_DESCR_DMA_STAT_MASK;
}
static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
@@ -152,15 +153,15 @@ static void gelic_card_enable_rxdmac(struct gelic_card *card)
if (gelic_descr_get_status(card->rx_chain.head) !=
GELIC_DESCR_DMA_CARDOWNED) {
printk(KERN_ERR "%s: status=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
+ be32_to_cpu(card->rx_chain.head->hw_regs.dmac_cmd_status));
printk(KERN_ERR "%s: nextphy=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->next_descr_addr));
+ be32_to_cpu(card->rx_chain.head->hw_regs.next_descr_addr));
printk(KERN_ERR "%s: head=%p\n", __func__,
card->rx_chain.head);
}
#endif
status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
- card->rx_chain.head->bus_addr, 0);
+ card->rx_chain.head->link.cpu_addr, 0);
if (status)
dev_info(ctodev(card),
"lv1_net_start_rx_dma failed, status=%d\n", status);
@@ -195,8 +196,8 @@ static void gelic_card_disable_rxdmac(struct gelic_card *card)
static void gelic_descr_set_status(struct gelic_descr *descr,
enum gelic_descr_dma_status status)
{
- descr->dmac_cmd_status = cpu_to_be32(status |
- (be32_to_cpu(descr->dmac_cmd_status) &
+ descr->hw_regs.dmac_cmd_status = cpu_to_be32(status |
+ (be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
~GELIC_DESCR_DMA_STAT_MASK));
/*
* dma_cmd_status field is used to indicate whether the descriptor
@@ -224,13 +225,14 @@ static void gelic_card_reset_chain(struct gelic_card *card,
for (descr = start_descr; start_descr != descr->next; descr++) {
gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
- descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ descr->hw_regs.next_descr_addr
+ = cpu_to_be32(descr->next->link.cpu_addr);
}
chain->head = start_descr;
chain->tail = (descr - 1);
- (descr - 1)->next_descr_addr = 0;
+ (descr - 1)->hw_regs.next_descr_addr = 0;
}
void gelic_card_up(struct gelic_card *card)
@@ -286,10 +288,12 @@ static void gelic_card_free_chain(struct gelic_card *card,
{
struct gelic_descr *descr;
- for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) {
- dma_unmap_single(ctodev(card), descr->bus_addr,
- GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
- descr->bus_addr = 0;
+ for (descr = descr_in; descr && descr->link.cpu_addr;
+ descr = descr->next) {
+ dma_unmap_single(ctodev(card), descr->link.cpu_addr,
+ descr->link.size, DMA_BIDIRECTIONAL);
+ descr->link.cpu_addr = 0;
+ descr->link.size = 0;
}
}
@@ -317,17 +321,21 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
- dma_addr_t cpu_addr;
-
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- cpu_addr = dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+ descr->link.size = sizeof(struct gelic_hw_regs);
+ descr->link.cpu_addr = dma_map_single(ctodev(card), descr,
+ descr->link.size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ctodev(card), cpu_addr))
- goto iommu_error;
+ if (dma_mapping_error(ctodev(card), descr->link.cpu_addr)) {
+ for (i--, descr--; 0 <= i; i--, descr--) {
+ dma_unmap_single(ctodev(card),
+ descr->link.cpu_addr, descr->link.size,
+ DMA_BIDIRECTIONAL);
+ }
+ return -ENOMEM;
+ }
- descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@@ -338,24 +346,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* chain bus addr of hw descriptor */
descr = start_descr;
for (i = 0; i < no; i++, descr++) {
- descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ descr->hw_regs.next_descr_addr =
+ cpu_to_be32(descr->next->link.cpu_addr);
}
chain->head = start_descr;
chain->tail = start_descr;
/* do not chain last hw descriptor */
- (descr - 1)->next_descr_addr = 0;
+ (descr - 1)->hw_regs.next_descr_addr = 0;
return 0;
-
-iommu_error:
- for (i--, descr--; 0 <= i; i--, descr--)
- if (descr->bus_addr)
- dma_unmap_single(ctodev(card), descr->bus_addr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);
- return -ENOMEM;
}
/**
@@ -383,16 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
+ descr->hw_regs.dmac_cmd_status = 0;
+ descr->hw_regs.result_size = 0;
+ descr->hw_regs.valid_size = 0;
+ descr->hw_regs.data_error = 0;
+ descr->hw_regs.payload.dev_addr = 0;
+ descr->hw_regs.payload.size = 0;
+
descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
- descr->buf_addr = 0; /* tell DMAC don't touch memory */
+ descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(rx_skb_size);
- descr->dmac_cmd_status = 0;
- descr->result_size = 0;
- descr->valid_size = 0;
- descr->data_error = 0;
offset = ((unsigned long)descr->skb->data) &
(GELIC_NET_RXBUF_ALIGN - 1);
@@ -401,7 +404,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
/* io-mmu-map the skb */
cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
- descr->buf_addr = cpu_to_be32(cpu_addr);
+ descr->hw_regs.payload.dev_addr = cpu_to_be32(cpu_addr);
if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
@@ -409,10 +412,14 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
"%s:Could not iommu-map rx buffer\n", __func__);
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
return -ENOMEM;
- } else {
- gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
- return 0;
}
+
+ descr->hw_regs.payload.size = cpu_to_be32(GELIC_NET_MAX_FRAME);
+ descr->hw_regs.payload.dev_addr = cpu_to_be32(cpu_addr);
+
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
+
+ return 0;
}
/**
@@ -427,14 +434,15 @@ static void gelic_card_release_rx_chain(struct gelic_card *card)
do {
if (descr->skb) {
dma_unmap_single(ctodev(card),
- be32_to_cpu(descr->buf_addr),
- descr->skb->len,
- DMA_FROM_DEVICE);
- descr->buf_addr = 0;
+ be32_to_cpu(descr->hw_regs.payload.dev_addr),
+ descr->skb->len,
+ DMA_FROM_DEVICE);
+ descr->hw_regs.payload.dev_addr = 0;
+ descr->hw_regs.payload.size = 0;
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
gelic_descr_set_status(descr,
- GELIC_DESCR_DMA_NOT_IN_USE);
+ GELIC_DESCR_DMA_NOT_IN_USE);
}
descr = descr->next;
} while (descr != card->rx_chain.head);
@@ -496,19 +504,20 @@ static void gelic_descr_release_tx(struct gelic_card *card,
{
struct sk_buff *skb = descr->skb;
- BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL));
+ BUG_ON(!(be32_to_cpu(descr->hw_regs.data_status) & GELIC_DESCR_TX_TAIL));
- dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_single(ctodev(card),
+ be32_to_cpu(descr->hw_regs.payload.dev_addr), skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
- descr->buf_addr = 0;
- descr->buf_size = 0;
- descr->next_descr_addr = 0;
- descr->result_size = 0;
- descr->valid_size = 0;
- descr->data_status = 0;
- descr->data_error = 0;
+ descr->hw_regs.payload.dev_addr = 0;
+ descr->hw_regs.payload.size = 0;
+ descr->hw_regs.next_descr_addr = 0;
+ descr->hw_regs.result_size = 0;
+ descr->hw_regs.valid_size = 0;
+ descr->hw_regs.data_status = 0;
+ descr->hw_regs.data_error = 0;
descr->skb = NULL;
/* set descr status */
@@ -701,7 +710,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
struct sk_buff *skb)
{
if (skb->ip_summed != CHECKSUM_PARTIAL)
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
else {
@@ -709,19 +718,19 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
* if yes: tcp? udp? */
if (skb->protocol == htons(ETH_P_IP)) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
else /*
* the stack should checksum non-tcp and non-udp
* packets on his own: NETIF_F_IP_CSUM
*/
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
}
@@ -789,11 +798,11 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
return -ENOMEM;
}
- descr->buf_addr = cpu_to_be32(buf);
- descr->buf_size = cpu_to_be32(skb->len);
+ descr->hw_regs.payload.dev_addr = cpu_to_be32(buf);
+ descr->hw_regs.payload.size = cpu_to_be32(skb->len);
descr->skb = skb;
- descr->data_status = 0;
- descr->next_descr_addr = 0; /* terminate hw descr */
+ descr->hw_regs.data_status = 0;
+ descr->hw_regs.next_descr_addr = 0; /* terminate hw descr */
gelic_descr_set_tx_cmdstat(descr, skb);
/* bump free descriptor pointer */
@@ -818,7 +827,7 @@ static int gelic_card_kick_txdma(struct gelic_card *card,
if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) {
card->tx_dma_progress = 1;
status = lv1_net_start_tx_dma(bus_id(card), dev_id(card),
- descr->bus_addr, 0);
+ descr->link.cpu_addr, 0);
if (status) {
card->tx_dma_progress = 0;
dev_info(ctodev(card), "lv1_net_start_txdma failed," \
@@ -871,7 +880,8 @@ netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* link this prepared descriptor to previous one
* to achieve high performance
*/
- descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
+ descr->prev->hw_regs.next_descr_addr =
+ cpu_to_be32(descr->link.cpu_addr);
/*
* as hardware descriptor is modified in the above lines,
* ensure that the hardware sees it
@@ -884,12 +894,12 @@ netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
*/
netdev->stats.tx_dropped++;
/* don't trigger BUG_ON() in gelic_descr_release_tx */
- descr->data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
+ descr->hw_regs.data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
gelic_descr_release_tx(card, descr);
/* reset head */
card->tx_chain.head = descr;
/* reset hw termination */
- descr->prev->next_descr_addr = 0;
+ descr->prev->hw_regs.next_descr_addr = 0;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
}
@@ -914,21 +924,21 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
struct sk_buff *skb = descr->skb;
u32 data_status, data_error;
- data_status = be32_to_cpu(descr->data_status);
- data_error = be32_to_cpu(descr->data_error);
+ data_status = be32_to_cpu(descr->hw_regs.data_status);
+ data_error = be32_to_cpu(descr->hw_regs.data_error);
/* unmap skb buffer */
- dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- skb_put(skb, be32_to_cpu(descr->valid_size)?
- be32_to_cpu(descr->valid_size) :
- be32_to_cpu(descr->result_size));
- if (!descr->valid_size)
+ dma_unmap_single(ctodev(card),
+ be32_to_cpu(descr->hw_regs.payload.dev_addr),
+ be32_to_cpu(descr->hw_regs.payload.size), DMA_FROM_DEVICE);
+
+ skb_put(skb, be32_to_cpu(descr->hw_regs.valid_size)?
+ be32_to_cpu(descr->hw_regs.valid_size) :
+ be32_to_cpu(descr->hw_regs.result_size));
+ if (!descr->hw_regs.valid_size)
dev_info(ctodev(card), "buffer full %x %x %x\n",
- be32_to_cpu(descr->result_size),
- be32_to_cpu(descr->buf_size),
- be32_to_cpu(descr->dmac_cmd_status));
+ be32_to_cpu(descr->hw_regs.result_size),
+ be32_to_cpu(descr->hw_regs.payload.size),
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status));
descr->skb = NULL;
/*
@@ -1039,14 +1049,14 @@ refill:
/* is the current descriptor terminated with next_descr == NULL? */
dmac_chain_ended =
- be32_to_cpu(descr->dmac_cmd_status) &
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
GELIC_DESCR_RX_DMA_CHAIN_END;
/*
* So that always DMAC can see the end
* of the descriptor chain to avoid
* from unwanted DMAC overrun.
*/
- descr->next_descr_addr = 0;
+ descr->hw_regs.next_descr_addr = 0;
/* change the descriptor state: */
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
@@ -1063,7 +1073,8 @@ refill:
/*
* Set this descriptor the end of the chain.
*/
- descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
+ descr->prev->hw_regs.next_descr_addr =
+ cpu_to_be32(descr->link.cpu_addr);
/*
* If dmac chain was met, DMAC stopped.
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 0d98defb011e..f7d7931e51b7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -221,29 +221,35 @@ enum gelic_lv1_phy {
GELIC_LV1_PHY_ETHERNET_0 = 0x0000000000000002L,
};
-/* size of hardware part of gelic descriptor */
-#define GELIC_DESCR_SIZE (32)
-
enum gelic_port_type {
GELIC_PORT_ETHERNET_0 = 0,
GELIC_PORT_WIRELESS = 1,
GELIC_PORT_MAX
};
-struct gelic_descr {
- /* as defined by the hardware */
- __be32 buf_addr;
- __be32 buf_size;
+/* As defined by the gelic hardware device. */
+struct gelic_hw_regs {
+ struct {
+ __be32 dev_addr;
+ __be32 size;
+ } __packed payload;
__be32 next_descr_addr;
__be32 dmac_cmd_status;
__be32 result_size;
__be32 valid_size; /* all zeroes for tx */
__be32 data_status;
__be32 data_error; /* all zeroes for tx */
+} __packed;
+
+struct gelic_chain_link {
+ dma_addr_t cpu_addr;
+ unsigned int size;
+};
- /* used in the driver */
+struct gelic_descr {
+ struct gelic_hw_regs hw_regs;
+ struct gelic_chain_link link;
struct sk_buff *skb;
- dma_addr_t bus_addr;
struct gelic_descr *next;
struct gelic_descr *prev;
} __attribute__((aligned(32)));
@@ -346,12 +352,6 @@ static inline void *port_priv(struct gelic_port *port)
return port->priv;
}
-#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-void udbg_shutdown_ps3gelic(void);
-#else
-static inline void udbg_shutdown_ps3gelic(void) {}
-#endif
-
int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
void gelic_card_up(struct gelic_card *card);
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 23cd610bd376..85cdbdd44fec 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -26,7 +26,7 @@ config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
select LIBWX
- select PHYLIB
+ select PHYLINK
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index ddc5f6d20b9c..cc3bec42ed8e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -8,6 +8,7 @@
#include "wx_type.h"
#include "wx_ethtool.h"
#include "wx_hw.h"
+#include "wx_lib.h"
struct wx_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -75,7 +76,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -185,3 +186,238 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
}
}
EXPORT_SYMBOL(wx_get_drvinfo);
+
+int wx_nway_reset(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_nway_reset(wx->phylink);
+}
+EXPORT_SYMBOL(wx_nway_reset);
+
+int wx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(wx->phylink, cmd);
+}
+EXPORT_SYMBOL(wx_get_link_ksettings);
+
+int wx_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(wx->phylink, cmd);
+}
+EXPORT_SYMBOL(wx_set_link_ksettings);
+
+void wx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ phylink_ethtool_get_pauseparam(wx->phylink, pause);
+}
+EXPORT_SYMBOL(wx_get_pauseparam);
+
+int wx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_set_pauseparam(wx->phylink, pause);
+}
+EXPORT_SYMBOL(wx_set_pauseparam);
+
+void wx_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ring->rx_max_pending = WX_MAX_RXD;
+ ring->tx_max_pending = WX_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = wx->rx_ring_count;
+ ring->tx_pending = wx->tx_ring_count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+EXPORT_SYMBOL(wx_get_ringparam);
+
+int wx_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
+ /* only valid if in constant ITR mode */
+ if (wx->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = wx->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (wx->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = wx->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_coalesce);
+
+int wx_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u16 tx_itr_param, rx_itr_param;
+ struct wx_q_vector *q_vector;
+ u16 max_eitr;
+ int i;
+
+ if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+ }
+
+ if (ec->tx_max_coalesced_frames_irq)
+ wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if (wx->mac.type == wx_mac_sp)
+ max_eitr = WX_SP_MAX_EITR;
+ else
+ max_eitr = WX_EM_MAX_EITR;
+
+ if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
+ (ec->tx_coalesce_usecs > (max_eitr >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ wx->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (wx->rx_itr_setting == 1)
+ rx_itr_param = WX_20K_ITR;
+ else
+ rx_itr_param = wx->rx_itr_setting;
+
+ if (ec->tx_coalesce_usecs > 1)
+ wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ wx->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (wx->tx_itr_setting == 1) {
+ if (wx->mac.type == wx_mac_sp)
+ tx_itr_param = WX_12K_ITR;
+ else
+ tx_itr_param = WX_20K_ITR;
+ } else {
+ tx_itr_param = wx->tx_itr_setting;
+ }
+
+ /* mixed Rx/Tx */
+ if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
+ wx->tx_itr_setting = wx->rx_itr_setting;
+
+ for (i = 0; i < wx->num_q_vectors; i++) {
+ q_vector = wx->q_vector[i];
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ wx_write_eitr(q_vector);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_coalesce);
+
+static unsigned int wx_max_channels(struct wx *wx)
+{
+ unsigned int max_combined;
+
+ if (!wx->msix_q_entries) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else {
+ /* support up to max allowed queues with RSS */
+ if (wx->mac.type == wx_mac_sp)
+ max_combined = 63;
+ else
+ max_combined = 8;
+ }
+
+ return max_combined;
+}
+
+void wx_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = wx_max_channels(wx);
+
+ /* report info for other vector */
+ if (wx->msix_q_entries) {
+ ch->max_other = 1;
+ ch->other_count = 1;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
+}
+EXPORT_SYMBOL(wx_get_channels);
+
+int wx_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ unsigned int count = ch->combined_count;
+ struct wx *wx = netdev_priv(dev);
+
+ /* verify other_count has not changed */
+ if (ch->other_count != 1)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > wx_max_channels(wx))
+ return -EINVAL;
+
+ wx->ring_feature[RING_F_RSS].limit = count;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_channels);
+
+u32 wx_get_msglevel(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return wx->msg_enable;
+}
+EXPORT_SYMBOL(wx_get_msglevel);
+
+void wx_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ wx->msg_enable = data;
+}
+EXPORT_SYMBOL(wx_set_msglevel);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 16d1a09369a6..600c3b597d1a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -13,4 +13,31 @@ void wx_get_mac_stats(struct net_device *netdev,
void wx_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
+int wx_nway_reset(struct net_device *netdev);
+int wx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
+int wx_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd);
+void wx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int wx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+void wx_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack);
+int wx_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+int wx_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+void wx_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch);
+int wx_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch);
+u32 wx_get_msglevel(struct net_device *netdev);
+void wx_set_msglevel(struct net_device *netdev, u32 data);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 533e912af089..1db754615cca 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx)
int vector;
for (vector = 0; vector < wx->num_q_vectors; vector++)
- synchronize_irq(wx->msix_entries[vector].vector);
+ synchronize_irq(wx->msix_q_entries[vector].vector);
- synchronize_irq(wx->msix_entries[vector].vector);
+ synchronize_irq(wx->msix_entry->vector);
} else {
synchronize_irq(pdev->irq);
}
@@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx)
wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
}
+#define WX_ETH_FRAMING 20
+
+/**
+ * wx_hpbthresh - calculate high water mark for flow control
+ *
+ * @wx: board private structure to calculate for
+ **/
+static int wx_hpbthresh(struct wx *wx)
+{
+ struct net_device *dev = wx->netdev;
+ int link, tc, kb, marker;
+ u32 dv_id, rx_pba;
+
+ /* Calculate max LAN frame size */
+ link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
+ tc = link;
+
+ /* Calculate delay value for device */
+ dv_id = WX_DV(link, tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ kb = WX_BT2KB(dv_id);
+ rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
+
+ marker = rx_pba - kb;
+
+ /* It is possible that the packet buffer is not large enough
+ * to provide required headroom. In this case throw an error
+ * to user and a do the best we can.
+ */
+ if (marker < 0) {
+ dev_warn(&wx->pdev->dev,
+ "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
+ marker = tc + 1;
+ }
+
+ return marker;
+}
+
+/**
+ * wx_lpbthresh - calculate low water mark for flow control
+ *
+ * @wx: board private structure to calculate for
+ **/
+static int wx_lpbthresh(struct wx *wx)
+{
+ struct net_device *dev = wx->netdev;
+ u32 dv_id;
+ int tc;
+
+ /* Calculate max LAN frame size */
+ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Calculate delay value for device */
+ dv_id = WX_LOW_DV(tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ return WX_BT2KB(dv_id);
+}
+
+/**
+ * wx_pbthresh_setup - calculate and setup high low water marks
+ *
+ * @wx: board private structure to calculate for
+ **/
+static void wx_pbthresh_setup(struct wx *wx)
+{
+ wx->fc.high_water = wx_hpbthresh(wx);
+ wx->fc.low_water = wx_lpbthresh(wx);
+
+ /* Low water marks must not be larger than high water marks */
+ if (wx->fc.low_water > wx->fc.high_water)
+ wx->fc.low_water = 0;
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
@@ -1522,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
+static void wx_store_reta(struct wx *wx)
+{
+ u8 *indir_tbl = wx->rss_indir_tbl;
+ u32 reta = 0;
+ u32 i;
+
+ /* Fill out the redirection table as follows:
+ * - 8 bit wide entries containing 4 bit RSS index
+ */
+ for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
+ reta |= indir_tbl[i] << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
+ reta = 0;
+ }
+ }
+}
+
+static void wx_setup_reta(struct wx *wx)
+{
+ u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 random_key_size = WX_RSS_KEY_SIZE / 4;
+ u32 i, j;
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < random_key_size; i++)
+ wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
+
+ /* Fill out redirection table */
+ memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+
+ for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ wx->rss_indir_tbl[i] = j;
+ }
+
+ wx_store_reta(wx);
+}
+
+static void wx_setup_mrqc(struct wx *wx)
+{
+ u32 rss_field = 0;
+
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
+
+ /* Perform hash on these packet types */
+ rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
+ WX_RDB_RA_CTL_RSS_IPV4_TCP |
+ WX_RDB_RA_CTL_RSS_IPV4_UDP |
+ WX_RDB_RA_CTL_RSS_IPV6 |
+ WX_RDB_RA_CTL_RSS_IPV6_TCP |
+ WX_RDB_RA_CTL_RSS_IPV6_UDP;
+
+ netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+
+ wx_setup_reta(wx);
+
+ if (wx->rss_enabled)
+ rss_field |= WX_RDB_RA_CTL_RSS_EN;
+
+ wr32(wx, WX_RDB_RA_CTL, rss_field);
+}
+
/**
* wx_configure_rx - Configure Receive Unit after Reset
* @wx: pointer to private structure
@@ -1554,6 +1695,8 @@ void wx_configure_rx(struct wx *wx)
wr32(wx, WX_PSR_CTL, psrctl);
}
+ wx_setup_mrqc(wx);
+
/* set_rx_buffer_len must be called before ring initialization */
wx_set_rx_buffer_len(wx);
@@ -1584,6 +1727,7 @@ static void wx_configure_isb(struct wx *wx)
void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
+ wx_pbthresh_setup(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@@ -1750,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
}
EXPORT_SYMBOL(wx_get_pcie_msix_counts);
+/**
+ * wx_init_rss_key - Initialize wx RSS key
+ * @wx: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static int wx_init_rss_key(struct wx *wx)
+{
+ u32 *rss_key;
+
+ if (!wx->rss_key) {
+ rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
+ wx->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
int wx_sw_init(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
@@ -1777,14 +1943,23 @@ int wx_sw_init(struct wx *wx)
wx->subsystem_device_id = swab16((u16)ssid);
}
+ err = wx_init_rss_key(wx);
+ if (err < 0) {
+ wx_err(wx, "rss key allocation failed\n");
+ return err;
+ }
+
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr),
GFP_KERNEL);
if (!wx->mac_table) {
wx_err(wx, "mac_table allocation failed\n");
+ kfree(wx->rss_key);
return -ENOMEM;
}
+ wx->msix_in_use = false;
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
@@ -2003,6 +2178,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
}
EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
+static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl |= WX_PX_RR_CFG_DROP_EN;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl &= ~WX_PX_RR_CFG_DROP_EN;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
+{
+ u16 pause_time = WX_DEFAULT_FCPAUSE;
+ u32 mflcn_reg, fccfg_reg, reg;
+ u32 fcrtl, fcrth;
+ int i;
+
+ /* Low water mark of zero causes XOFF floods */
+ if (tx_pause && wx->fc.high_water) {
+ if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
+ wx_err(wx, "Invalid water mark configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
+ mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
+
+ fccfg_reg = rd32(wx, WX_RDB_RFCC);
+ fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
+
+ if (rx_pause)
+ mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
+ if (tx_pause)
+ fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
+
+ /* Set 802.3x based flow control settings. */
+ wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
+ wr32(wx, WX_RDB_RFCC, fccfg_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if (tx_pause && wx->fc.high_water) {
+ fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
+ wr32(wx, WX_RDB_RFCL, fcrtl);
+ fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
+ } else {
+ wr32(wx, WX_RDB_RFCL, 0);
+ /* In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
+ }
+
+ wr32(wx, WX_RDB_RFCH, fcrth);
+
+ /* Configure pause time */
+ reg = pause_time * 0x00010001;
+ wr32(wx, WX_RDB_RFCV, reg);
+
+ /* Configure flow control refresh threshold value */
+ wr32(wx, WX_RDB_RFCRT, pause_time / 2);
+
+ /* We should set the drop enable bit if:
+ * Number of Rx queues > 1 and flow control is disabled
+ *
+ * This allows us to avoid head of line blocking for security
+ * and performance reasons.
+ */
+ if (wx->num_rx_queues > 1 && !tx_pause) {
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_enable_rx_drop(wx, wx->rx_ring[i]);
+ } else {
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_disable_rx_drop(wx, wx->rx_ring[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_fc_enable);
+
/**
* wx_update_stats - Update the board statistics counters.
* @wx: board private structure
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 12c20a7c364d..9e219fa717a2 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
void wx_update_stats(struct wx *wx);
void wx_clear_hw_cntrs(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 347d3cec02a3..8706223a6e5a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all);
**/
static void wx_set_rss_queues(struct wx *wx)
{
- wx->num_rx_queues = wx->mac.max_rx_queues;
- wx->num_tx_queues = wx->mac.max_tx_queues;
+ struct wx_ring_feature *f;
+
+ /* set mask for 16 queue limit of RSS */
+ f = &wx->ring_feature[RING_F_RSS];
+ f->indices = f->limit;
+
+ wx->num_rx_queues = f->limit;
+ wx->num_tx_queues = f->limit;
}
static void wx_set_num_queues(struct wx *wx)
@@ -1595,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx)
struct irq_affinity affd = {0, };
int nvecs, i;
- nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
+ /* We start by asking for one vector per queue pair */
+ nvecs = max(wx->num_rx_queues, wx->num_tx_queues);
+ nvecs = min_t(int, nvecs, num_online_cpus());
+ nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors);
- wx->msix_entries = kcalloc(nvecs,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entries)
+ wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_q_entries)
return -ENOMEM;
+ /* One for non-queue interrupts */
+ nvecs += 1;
+
+ if (!wx->msix_in_use) {
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
+ }
+ }
+
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
nvecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&affd);
if (nvecs < 0) {
wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
- kfree(wx->msix_entries);
- wx->msix_entries = NULL;
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
return nvecs;
}
+ wx->msix_entry->entry = 0;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ nvecs -= 1;
for (i = 0; i < nvecs; i++) {
- wx->msix_entries[i].entry = i;
- wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
+ wx->msix_q_entries[i].entry = i;
+ wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
}
- /* one for msix_other */
- nvecs -= 1;
wx->num_q_vectors = nvecs;
- wx->num_rx_queues = nvecs;
- wx->num_tx_queues = nvecs;
return 0;
}
@@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
- wx->num_rx_queues = 1;
- wx->num_tx_queues = 1;
- wx->num_q_vectors = 1;
+ /* Disable RSS */
+ dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
+ wx->ring_feature[RING_F_RSS].limit = 1;
+
+ wx_set_num_queues(wx);
/* minmum one for queue, one for misc*/
nvecs = 1;
@@ -1905,8 +1929,12 @@ void wx_reset_interrupt_capability(struct wx *wx)
return;
if (pdev->msix_enabled) {
- kfree(wx->msix_entries);
- wx->msix_entries = NULL;
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ if (!wx->msix_in_use) {
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
+ }
}
pci_free_irq_vectors(wx->pdev);
}
@@ -1978,7 +2006,7 @@ void wx_free_irq(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_entries[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
/* free only the irqs that were actually requested */
if (!q_vector->rx.ring && !q_vector->tx.ring)
@@ -1988,7 +2016,7 @@ void wx_free_irq(struct wx *wx)
}
if (wx->mac.type == wx_mac_em)
- free_irq(wx->msix_entries[vector].vector, wx);
+ free_irq(wx->msix_entry->vector, wx);
}
EXPORT_SYMBOL(wx_free_irq);
@@ -2065,6 +2093,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
+ msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2082,7 +2111,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
* when it needs to update EITR registers at runtime. Hardware
* specific quirks/differences are taken care of here.
*/
-static void wx_write_eitr(struct wx_q_vector *q_vector)
+void wx_write_eitr(struct wx_q_vector *q_vector)
{
struct wx *wx = q_vector->wx;
int v_idx = q_vector->v_idx;
@@ -2095,7 +2124,7 @@ static void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg |= WX_PX_ITR_CNT_WDIS;
- wr32(wx, WX_PX_ITR(v_idx), itr_reg);
+ wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
}
/**
@@ -2141,9 +2170,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
- wx_set_ivar(wx, -1, 0, v_idx);
+ wx_set_ivar(wx, -1, 0, 0);
if (pdev->msix_enabled)
- wr32(wx, WX_PX_ITR(v_idx), 1950);
+ wr32(wx, WX_PX_ITR(0), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@@ -2656,11 +2685,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
- if (changed & NETIF_F_RXHASH)
+ if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
WX_RDB_RA_CTL_RSS_EN);
- else
+ wx->rss_enabled = true;
+ } else {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
+ wx->rss_enabled = false;
+ }
if (changed &
(NETIF_F_HW_VLAN_CTAG_RX |
@@ -2671,4 +2703,71 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
}
EXPORT_SYMBOL(wx_set_features);
+void wx_set_ring(struct wx *wx, u32 new_tx_count,
+ u32 new_rx_count, struct wx_ring *temp_ring)
+{
+ int i, err = 0;
+
+ /* Setup new Tx resources and free the old Tx resources in that order.
+ * We can then assign the new resources to the rings via a memcpy.
+ * The advantage to this approach is that we are guaranteed to still
+ * have resources even in the case of an allocation failure.
+ */
+ if (new_tx_count != wx->tx_ring_count) {
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], wx->tx_ring[i],
+ sizeof(struct wx_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = wx_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ wx_err(wx, "setup new tx resources failed, keep using the old config\n");
+ while (i) {
+ i--;
+ wx_free_tx_resources(&temp_ring[i]);
+ }
+ return;
+ }
+ }
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ wx_free_tx_resources(wx->tx_ring[i]);
+
+ memcpy(wx->tx_ring[i], &temp_ring[i],
+ sizeof(struct wx_ring));
+ }
+
+ wx->tx_ring_count = new_tx_count;
+ }
+
+ /* Repeat the process for the Rx rings if needed */
+ if (new_rx_count != wx->rx_ring_count) {
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], wx->rx_ring[i],
+ sizeof(struct wx_ring));
+
+ temp_ring[i].count = new_rx_count;
+ err = wx_setup_rx_resources(&temp_ring[i]);
+ if (err) {
+ wx_err(wx, "setup new rx resources failed, keep using the old config\n");
+ while (i) {
+ i--;
+ wx_free_rx_resources(&temp_ring[i]);
+ }
+ return;
+ }
+ }
+
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ wx_free_rx_resources(wx->rx_ring[i]);
+ memcpy(wx->rx_ring[i], &temp_ring[i],
+ sizeof(struct wx_ring));
+ }
+
+ wx->rx_ring_count = new_rx_count;
+ }
+}
+EXPORT_SYMBOL(wx_set_ring);
+
+MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index df1f4a5951f0..ec909e876720 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx);
int wx_setup_isb_resources(struct wx *wx);
void wx_free_isb_resources(struct wx *wx);
u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
+void wx_write_eitr(struct wx_q_vector *q_vector);
void wx_configure_vectors(struct wx *wx);
void wx_clean_all_rx_rings(struct wx *wx);
void wx_clean_all_tx_rings(struct wx *wx);
@@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
+void wx_set_ring(struct wx *wx, u32 new_tx_count,
+ u32 new_rx_count, struct wx_ring *temp_ring);
#endif /* _NGBE_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 83f9bb7b3c22..b4dc4f341117 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
@@ -130,6 +131,15 @@
#define WX_RDB_PFCMACDAH 0x19214
#define WX_RDB_LXOFFTXC 0x19218
#define WX_RDB_LXONTXC 0x1921C
+/* Flow Control Registers */
+#define WX_RDB_RFCV 0x19200
+#define WX_RDB_RFCL 0x19220
+#define WX_RDB_RFCL_XONE BIT(31)
+#define WX_RDB_RFCH 0x19260
+#define WX_RDB_RFCH_XOFFE BIT(31)
+#define WX_RDB_RFCRT 0x192A0
+#define WX_RDB_RFCC 0x192A4
+#define WX_RDB_RFCC_RFCE_802_3X BIT(3)
/* ring assignment */
#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
#define WX_RDB_PL_CFG_L4HDR BIT(1)
@@ -137,8 +147,16 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
+#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
+#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
+#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
+#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
+#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20)
+#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
+#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
+#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
/******************************* PSR Registers *******************************/
/* psr control */
@@ -305,6 +323,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
#define WX_7K_ITR 595
#define WX_12K_ITR 336
+#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
#define WX_EM_MAX_EITR 0x00007FFCU
@@ -330,6 +349,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31)
+#define WX_PX_RR_CFG_DROP_EN BIT(30)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
#define WX_PX_RR_CFG_RR_THER_SHIFT 16
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
@@ -367,8 +387,46 @@ enum WX_MSCA_CMD_value {
#define WX_MAC_STATE_MODIFIED 0x2
#define WX_MAC_STATE_IN_USE 0x4
+/* BitTimes (BT) conversion */
+#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
+#define WX_B2BT(BT) ((BT) * 8)
+
+/* Calculate Delay to respond to PFC */
+#define WX_PFC_D 672
+/* Calculate Cable Delay */
+#define WX_CABLE_DC 5556 /* Delay Copper */
+/* Calculate Delay incurred from higher layer */
+#define WX_HD 6144
+
+/* Calculate Interface Delay */
+#define WX_PHY_D 12800
+#define WX_MAC_D 4096
+#define WX_XAUI_D (2 * 1024)
+#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D)
+/* Calculate PCI Bus delay for low thresholds */
+#define WX_PCI_DELAY 10000
+
+/* Calculate delay value in bit times */
+#define WX_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \
+ (2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \
+ 2 * WX_B2BT(_max_frame_tc))
+
+/* Calculate low threshold delay values */
+#define WX_LOW_DV(_max_frame_tc) \
+ (2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1))
+
+/* flow control */
+#define WX_DEFAULT_FCPAUSE 0xFFFF
+
#define WX_MAX_RXD 8192
#define WX_MAX_TXD 8192
+#define WX_MIN_RXD 128
+#define WX_MIN_TXD 128
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
@@ -871,6 +929,19 @@ struct wx_q_vector {
struct wx_ring ring[] ____cacheline_internodealigned_in_smp;
};
+struct wx_ring_feature {
+ u16 limit; /* upper limit on feature indices */
+ u16 indices; /* current value of indices */
+ u16 mask; /* Mask used for feature to ring mapping */
+ u16 offset; /* offset to start of feature */
+};
+
+enum wx_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_RSS,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
enum wx_isb_idx {
WX_ISB_HEADER,
WX_ISB_MISC,
@@ -879,6 +950,11 @@ enum wx_isb_idx {
WX_ISB_MAX
};
+struct wx_fc_info {
+ u32 high_water; /* Flow Ctrl High-water */
+ u32 low_water; /* Flow Ctrl Low-water */
+};
+
/* Statistics counters collected by the MAC */
struct wx_hw_stats {
u64 gprc;
@@ -919,6 +995,7 @@ struct wx {
enum sp_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
+ struct wx_fc_info fc;
struct wx_mac_addr *mac_table;
u16 device_id;
u16 vendor_id;
@@ -939,6 +1016,8 @@ struct wx {
int speed;
int duplex;
struct phy_device *phydev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
bool wol_hw_supported;
bool ncsi_enabled;
@@ -966,7 +1045,10 @@ struct wx {
struct wx_q_vector *q_vector[64];
unsigned int queues_per_pool;
- struct msix_entry *msix_entries;
+ struct msix_entry *msix_q_entries;
+ struct msix_entry *msix_entry;
+ bool msix_in_use;
+ struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
dma_addr_t isb_dma;
@@ -974,8 +1056,9 @@ struct wx {
u32 isb_tag[WX_ISB_MAX];
#define WX_MAX_RETA_ENTRIES 128
+#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
-
+ bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
u32 wol;
@@ -992,7 +1075,7 @@ struct wx {
};
#define WX_INTR_ALL (~0ULL)
-#define WX_INTR_Q(i) BIT(i)
+#define WX_INTR_Q(i) BIT((i) + 1)
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -1044,4 +1127,9 @@ rd64(struct wx *wx, u32 reg)
#define wx_dbg(wx, fmt, arg...) \
dev_dbg(&(wx)->pdev->dev, fmt, ##arg)
+static inline struct wx *phylink_to_wx(struct phylink_config *config)
+{
+ return container_of(config, struct wx, phylink_config);
+}
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index afbdf6919071..786a652ae64f 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -7,7 +7,10 @@
#include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
#include "ngbe_ethtool.h"
+#include "ngbe_type.h"
static void ngbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
@@ -41,12 +44,75 @@ static int ngbe_set_wol(struct net_device *netdev,
return 0;
}
+static int ngbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+ struct wx_ring *temp_ring;
+ int i;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if (new_tx_count == wx->tx_ring_count &&
+ new_rx_count == wx->rx_ring_count)
+ return 0;
+
+ if (!netif_running(wx->netdev)) {
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->count = new_rx_count;
+ wx->tx_ring_count = new_tx_count;
+ wx->rx_ring_count = new_rx_count;
+
+ return 0;
+ }
+
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
+ temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
+ if (!temp_ring)
+ return -ENOMEM;
+
+ ngbe_down(wx);
+
+ wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
+ kvfree(temp_ring);
+
+ wx_configure(wx);
+ ngbe_up(wx);
+
+ return 0;
+}
+
+static int ngbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ int err;
+
+ err = wx_set_channels(dev, ch);
+ if (err < 0)
+ return err;
+
+ /* use setup TC to update any traffic class queue mapping */
+ return ngbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
static const struct ethtool_ops ngbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
- .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = wx_get_link_ksettings,
+ .set_link_ksettings = wx_set_link_ksettings,
+ .nway_reset = wx_nway_reset,
.get_wol = ngbe_get_wol,
.set_wol = ngbe_set_wol,
.get_sset_count = wx_get_sset_count,
@@ -54,6 +120,16 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats,
+ .get_pauseparam = wx_get_pauseparam,
+ .set_pauseparam = wx_set_pauseparam,
+ .get_ringparam = wx_get_ringparam,
+ .set_ringparam = ngbe_set_ringparam,
+ .get_coalesce = wx_get_coalesce,
+ .set_coalesce = wx_set_coalesce,
+ .get_channels = wx_get_channels,
+ .set_channels = ngbe_set_channels,
+ .get_msglevel = wx_get_msglevel,
+ .set_msglevel = wx_set_msglevel,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 8db804543e66..fdd6b4f70b7a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -80,28 +80,6 @@ static void ngbe_init_type_code(struct wx *wx)
}
/**
- * ngbe_init_rss_key - Initialize wx RSS key
- * @wx: device handle
- *
- * Allocates and initializes the RSS key if it is not allocated.
- **/
-static inline int ngbe_init_rss_key(struct wx *wx)
-{
- u32 *rss_key;
-
- if (!wx->rss_key) {
- rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
- if (unlikely(!rss_key))
- return -ENOMEM;
-
- netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
- wx->rss_key = rss_key;
- }
-
- return 0;
-}
-
-/**
* ngbe_sw_init - Initialize general software structures
* @wx: board private structure to initialize
**/
@@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx)
dev_err(&pdev->dev, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count;
- if (ngbe_init_rss_key(wx))
- return -ENOMEM;
+ wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES,
+ num_online_cpus());
+ wx->rss_enabled = true;
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
@@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL);
else
- wx_intr_enable(wx, NGBE_INTR_MISC(wx));
+ wx_intr_enable(wx, NGBE_INTR_MISC);
}
/**
@@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_entries[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
- err = request_irq(wx->msix_entries[vector].vector,
+ err = request_irq(wx->msix_entry->vector,
ngbe_msix_other, 0, netdev->name, wx);
if (err) {
@@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
free_queue_irqs:
while (vector) {
vector--;
- free_irq(wx->msix_entries[vector].vector,
+ free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
wx_reset_interrupt_capability(wx);
@@ -334,15 +313,15 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
-static void ngbe_down(struct wx *wx)
+void ngbe_down(struct wx *wx)
{
- phy_stop(wx->phydev);
+ phylink_stop(wx->phylink);
ngbe_disable_device(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
-static void ngbe_up(struct wx *wx)
+void ngbe_up(struct wx *wx)
{
wx_configure_vectors(wx);
@@ -359,7 +338,7 @@ static void ngbe_up(struct wx *wx)
if (wx->gpio_ctrl)
ngbe_sfp_modules_txrx_powerctl(wx, true);
- phy_start(wx->phydev);
+ phylink_start(wx->phylink);
}
/**
@@ -388,7 +367,7 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_free_resources;
- err = ngbe_phy_connect(wx);
+ err = phylink_connect_phy(wx->phylink, wx->phydev);
if (err)
goto err_free_irq;
@@ -404,7 +383,7 @@ static int ngbe_open(struct net_device *netdev)
return 0;
err_dis_phy:
- phy_disconnect(wx->phydev);
+ phylink_disconnect_phy(wx->phylink);
err_free_irq:
wx_free_irq(wx);
err_free_resources:
@@ -430,7 +409,7 @@ static int ngbe_close(struct net_device *netdev)
ngbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
- phy_disconnect(wx->phydev);
+ phylink_disconnect_phy(wx->phylink);
wx_control_hw(wx, false);
return 0;
@@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * ngbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @dev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ngbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ngbe_close(dev);
+
+ wx_clear_interrupt_scheme(wx);
+
+ if (tc)
+ netdev_set_num_tc(dev, tc);
+ else
+ netdev_reset_tc(dev);
+
+ wx_init_interrupt_scheme(wx);
+
+ if (netif_running(dev))
+ ngbe_open(dev);
+
+ return 0;
+}
+
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
@@ -582,6 +594,7 @@ static int ngbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
@@ -680,6 +693,7 @@ static int ngbe_probe(struct pci_dev *pdev,
return 0;
err_register:
+ phylink_destroy(wx->phylink);
wx_control_hw(wx, false);
err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx);
@@ -709,9 +723,11 @@ static void ngbe_remove(struct pci_dev *pdev)
netdev = wx->netdev;
unregister_netdev(netdev);
+ phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(wx->rss_key);
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index 6302ecca71bb..ec54b18c5fe7 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -56,22 +56,28 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
return ret;
}
-static void ngbe_handle_link_change(struct net_device *dev)
+static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct wx *wx = netdev_priv(dev);
- struct phy_device *phydev;
+}
+
+static void ngbe_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void ngbe_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct wx *wx = phylink_to_wx(config);
u32 lan_speed, reg;
- phydev = wx->phydev;
- if (!(wx->link != phydev->link ||
- wx->speed != phydev->speed ||
- wx->duplex != phydev->duplex))
- return;
+ wx_fc_enable(wx, tx_pause, rx_pause);
- wx->link = phydev->link;
- wx->speed = phydev->speed;
- wx->duplex = phydev->duplex;
- switch (phydev->speed) {
+ switch (speed) {
case SPEED_10:
lan_speed = 0;
break;
@@ -83,54 +89,51 @@ static void ngbe_handle_link_change(struct net_device *dev)
lan_speed = 2;
break;
}
+
wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed);
- if (phydev->link) {
- reg = rd32(wx, WX_MAC_TX_CFG);
- reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
- reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
- wr32(wx, WX_MAC_TX_CFG, reg);
- /* Re configure MAC RX */
- reg = rd32(wx, WX_MAC_RX_CFG);
- wr32(wx, WX_MAC_RX_CFG, reg);
- wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
- reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
- wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
- }
- phy_print_status(phydev);
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
+ reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+
+ /* Re configure MAC Rx */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
}
-int ngbe_phy_connect(struct wx *wx)
+static const struct phylink_mac_ops ngbe_mac_ops = {
+ .mac_config = ngbe_mac_config,
+ .mac_link_down = ngbe_mac_link_down,
+ .mac_link_up = ngbe_mac_link_up,
+};
+
+static int ngbe_phylink_init(struct wx *wx)
{
- int ret;
+ struct phylink_config *config;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
- ret = phy_connect_direct(wx->netdev,
- wx->phydev,
- ngbe_handle_link_change,
- PHY_INTERFACE_MODE_RGMII_ID);
- if (ret) {
- wx_err(wx, "PHY connect failed.\n");
- return ret;
- }
+ config = &wx->phylink_config;
+ config->dev = &wx->netdev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ config->mac_managed_pm = true;
- return 0;
-}
+ phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces);
-static void ngbe_phy_fixup(struct wx *wx)
-{
- struct phy_device *phydev = wx->phydev;
- struct ethtool_eee eee;
-
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- phydev->mac_managed_pm = true;
- if (wx->mac_type != em_mac_type_mdi)
- return;
- /* disable EEE, internal phy does not support eee */
- memset(&eee, 0, sizeof(eee));
- phy_ethtool_set_eee(phydev, &eee);
+ phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ wx->phylink = phylink;
+
+ return 0;
}
int ngbe_mdio_init(struct wx *wx)
@@ -165,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx)
return -ENODEV;
phy_attached_info(wx->phydev);
- ngbe_phy_fixup(wx);
wx->link = 0;
wx->speed = 0;
wx->duplex = 0;
+ ret = ngbe_phylink_init(wx);
+ if (ret) {
+ wx_err(wx, "failed to init phylink: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
index 0a6400dd89c4..f610b771888a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
@@ -7,6 +7,5 @@
#ifndef _NGBE_MDIO_H_
#define _NGBE_MDIO_H_
-int ngbe_phy_connect(struct wx *wx);
int ngbe_mdio_init(struct wx *wx);
#endif /* _NGBE_MDIO_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index ff754d69bdf6..f48ed7fc1805 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -80,7 +80,7 @@
NGBE_PX_MISC_IEN_GPIO)
#define NGBE_INTR_ALL 0x1FF
-#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define NGBE_INTR_MISC BIT(0)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
@@ -105,6 +105,7 @@
#define NGBE_FW_CMD_ST_FAIL 0x70657376
#define NGBE_MAX_FDIR_INDICES 7
+#define NGBE_MAX_RSS_INDICES 8
#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
@@ -130,4 +131,8 @@
extern char ngbe_driver_name[];
+void ngbe_down(struct wx *wx);
+void ngbe_up(struct wx *wx);
+int ngbe_setup_tc(struct net_device *dev, u8 tc);
+
#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index 3f336a088e43..db675512ce4d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -7,43 +7,93 @@
#include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
#include "txgbe_ethtool.h"
-static int txgbe_nway_reset(struct net_device *netdev)
+static int txgbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
- struct txgbe *txgbe = netdev_to_txgbe(netdev);
+ struct wx *wx = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+ struct wx_ring *temp_ring;
+ int i;
- return phylink_ethtool_nway_reset(txgbe->phylink);
-}
+ new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
-static int txgbe_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct txgbe *txgbe = netdev_to_txgbe(netdev);
+ new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if (new_tx_count == wx->tx_ring_count &&
+ new_rx_count == wx->rx_ring_count)
+ return 0;
+
+ if (!netif_running(wx->netdev)) {
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->count = new_rx_count;
+ wx->tx_ring_count = new_tx_count;
+ wx->rx_ring_count = new_rx_count;
+
+ return 0;
+ }
- return phylink_ethtool_ksettings_get(txgbe->phylink, cmd);
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
+ temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
+ if (!temp_ring)
+ return -ENOMEM;
+
+ txgbe_down(wx);
+
+ wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
+ kvfree(temp_ring);
+
+ txgbe_up(wx);
+
+ return 0;
}
-static int txgbe_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
+static int txgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
- struct txgbe *txgbe = netdev_to_txgbe(netdev);
+ int err;
+
+ err = wx_set_channels(dev, ch);
+ if (err < 0)
+ return err;
- return phylink_ethtool_ksettings_set(txgbe->phylink, cmd);
+ /* use setup TC to update any traffic class queue mapping */
+ return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static const struct ethtool_ops txgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo,
- .nway_reset = txgbe_nway_reset,
+ .nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = txgbe_get_link_ksettings,
- .set_link_ksettings = txgbe_set_link_ksettings,
+ .get_link_ksettings = wx_get_link_ksettings,
+ .set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
.get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats,
+ .get_pauseparam = wx_get_pauseparam,
+ .set_pauseparam = wx_set_pauseparam,
+ .get_ringparam = wx_get_ringparam,
+ .set_ringparam = txgbe_set_ringparam,
+ .get_coalesce = wx_get_coalesce,
+ .set_coalesce = wx_set_coalesce,
+ .get_channels = wx_get_channels,
+ .set_channels = txgbe_set_channels,
+ .get_msglevel = wx_get_msglevel,
+ .set_msglevel = wx_set_msglevel,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 526250102db2..3b151c410a5c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues)
wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
@@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_entries[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
free_queue_irqs:
while (vector) {
vector--;
- free_irq(wx->msix_entries[vector].vector,
+ free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
wx_reset_interrupt_capability(wx);
@@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx)
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
- struct txgbe *txgbe;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- txgbe = netdev_to_txgbe(netdev);
- phylink_start(txgbe->phylink);
+ phylink_start(wx->phylink);
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -290,18 +288,22 @@ static void txgbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
-static void txgbe_down(struct wx *wx)
+void txgbe_down(struct wx *wx)
{
- struct txgbe *txgbe = netdev_to_txgbe(wx->netdev);
-
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(txgbe->phylink);
+ phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
+void txgbe_up(struct wx *wx)
+{
+ wx_configure(wx);
+ txgbe_up_complete(wx);
+}
+
/**
* txgbe_init_type_code - Initialize the shared code
* @wx: pointer to hardware structure
@@ -376,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx)
wx_err(wx, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count;
+ wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES,
+ num_online_cpus());
+ wx->rss_enabled = true;
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -502,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * txgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @dev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int txgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ txgbe_close(dev);
+ else
+ txgbe_reset(wx);
+
+ wx_clear_interrupt_scheme(wx);
+
+ if (tc)
+ netdev_set_num_tc(dev, tc);
+ else
+ netdev_reset_tc(dev);
+
+ wx_init_interrupt_scheme(wx);
+
+ if (netif_running(dev))
+ txgbe_open(dev);
+
+ return 0;
+}
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -638,6 +679,7 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
@@ -775,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(wx->rss_key);
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index b6c06adb8656..1b84d495d14e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config,
phy_interface_t interface)
{
- struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
+ struct txgbe *txgbe = wx->priv;
if (interface == PHY_INTERFACE_MODE_10GBASER)
return &txgbe->xpcs->pcs;
@@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode,
static void txgbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
}
@@ -186,9 +187,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
u32 txcfg, wdg;
+ wx_fc_enable(wx, tx_pause, rx_pause);
+
txcfg = rd32(wx, WX_MAC_TX_CFG);
txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK;
@@ -217,7 +220,7 @@ static void txgbe_mac_link_up(struct phylink_config *config,
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
@@ -228,7 +231,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
txgbe_enable_sec_tx_path(wx);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
@@ -253,10 +256,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
phy_interface_t phy_mode;
struct phylink *phylink;
- config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
+ config = &wx->phylink_config;
config->dev = &wx->netdev->dev;
config->type = PHYLINK_NETDEV;
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
@@ -287,7 +287,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
}
}
- txgbe->phylink = phylink;
+ wx->phylink = phylink;
return 0;
}
@@ -483,11 +483,11 @@ static void txgbe_irq_handler(struct irq_desc *desc)
TXGBE_PX_MISC_ETH_AN)) {
u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
- phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
+ phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
}
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector;
+
+ /* now only suuported on MSI-X interrupt */
+ if (!wx->msix_entry)
+ return -EPERM;
+
+ girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -701,6 +706,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
int txgbe_init_phy(struct txgbe *txgbe)
{
+ struct wx *wx = txgbe->wx;
int ret;
if (txgbe->wx->media_type == sp_media_copper)
@@ -708,46 +714,48 @@ int txgbe_init_phy(struct txgbe *txgbe)
ret = txgbe_swnodes_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to register software nodes\n");
+ wx_err(wx, "failed to register software nodes\n");
return ret;
}
ret = txgbe_mdio_pcs_init(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret);
+ wx_err(wx, "failed to init mdio pcs: %d\n", ret);
goto err_unregister_swnode;
}
ret = txgbe_phylink_init(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init phylink\n");
+ wx_err(wx, "failed to init phylink\n");
goto err_destroy_xpcs;
}
ret = txgbe_gpio_init(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init gpio\n");
+ wx_err(wx, "failed to init gpio\n");
goto err_destroy_phylink;
}
ret = txgbe_clock_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to register clock: %d\n", ret);
+ wx_err(wx, "failed to register clock: %d\n", ret);
goto err_destroy_phylink;
}
ret = txgbe_i2c_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret);
+ wx_err(wx, "failed to init i2c interface: %d\n", ret);
goto err_unregister_clk;
}
ret = txgbe_sfp_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to register sfp\n");
+ wx_err(wx, "failed to register sfp\n");
goto err_unregister_i2c;
}
+ wx->msix_in_use = true;
+
return 0;
err_unregister_i2c:
@@ -756,7 +764,7 @@ err_unregister_clk:
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
err_destroy_phylink:
- phylink_destroy(txgbe->phylink);
+ phylink_destroy(wx->phylink);
err_destroy_xpcs:
xpcs_destroy(txgbe->xpcs);
err_unregister_swnode:
@@ -768,8 +776,8 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
if (txgbe->wx->media_type == sp_media_copper) {
- phylink_disconnect_phy(txgbe->phylink);
- phylink_destroy(txgbe->phylink);
+ phylink_disconnect_phy(txgbe->wx->phylink);
+ phylink_destroy(txgbe->wx->phylink);
return;
}
@@ -777,7 +785,8 @@ void txgbe_remove_phy(struct txgbe *txgbe)
platform_device_unregister(txgbe->i2c_dev);
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
- phylink_destroy(txgbe->phylink);
+ phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
+ txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 3ba9ce43f394..270a6fd9ad0b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -98,6 +98,7 @@
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
+#define TXGBE_MAX_RSS_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
@@ -122,19 +123,16 @@
#define TXGBE_DEFAULT_RX_WORK 128
#endif
-#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
-#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
+#define TXGBE_INTR_MISC BIT(0)
+#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
#define TXGBE_MAX_EITR GENMASK(11, 3)
extern char txgbe_driver_name[];
-static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev)
-{
- struct wx *wx = netdev_priv(netdev);
-
- return wx->priv;
-}
+void txgbe_down(struct wx *wx);
+void txgbe_up(struct wx *wx);
+int txgbe_setup_tc(struct net_device *dev, u8 tc);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
@@ -175,7 +173,6 @@ struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
struct dw_xpcs *xpcs;
- struct phylink *phylink;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 0014729b8865..35d96c633a33 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -26,6 +26,7 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
depends on HAS_IOMEM
+ depends on XILINX_DMA
select PHYLINK
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 575ff9de8985..807ead678551 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
+#include <linux/skbuff.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -379,6 +380,22 @@ struct axidma_bd {
#define XAE_NUM_MISC_CLOCKS 3
/**
+ * struct skbuf_dma_descriptor - skb for each dma descriptor
+ * @sgl: Pointer for sglist.
+ * @desc: Pointer to dma descriptor.
+ * @dma_address: dma address of sglist.
+ * @skb: Pointer to SKB transferred using DMA
+ * @sg_len: number of entries in the sglist.
+ */
+struct skbuf_dma_descriptor {
+ struct scatterlist sgl[MAX_SKB_FRAGS + 1];
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t dma_address;
+ struct sk_buff *skb;
+ int sg_len;
+};
+
+/**
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
@@ -435,6 +452,15 @@ struct axidma_bd {
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
* @coalesce_usec_tx: IRQ coalesce delay for TX
+ * @use_dmaengine: flag to check dmaengine framework usage.
+ * @tx_chan: TX DMA channel.
+ * @rx_chan: RX DMA channel.
+ * @tx_skb_ring: Pointer to TX skb ring buffer array.
+ * @rx_skb_ring: Pointer to RX skb ring buffer array.
+ * @tx_ring_head: TX skb ring buffer head index.
+ * @tx_ring_tail: TX skb ring buffer tail index.
+ * @rx_ring_head: RX skb ring buffer head index.
+ * @rx_ring_tail: RX skb ring buffer tail index.
*/
struct axienet_local {
struct net_device *ndev;
@@ -499,6 +525,15 @@ struct axienet_local {
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
u32 coalesce_usec_tx;
+ u8 use_dmaengine;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct skbuf_dma_descriptor **tx_skb_ring;
+ struct skbuf_dma_descriptor **rx_skb_ring;
+ int tx_ring_head;
+ int tx_ring_tail;
+ int rx_ring_head;
+ int rx_ring_tail;
};
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index bf6e33990490..aaf780fd4f5e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -38,6 +38,11 @@
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/circ_buf.h>
+#include <net/netdev_queues.h>
#include "xilinx_axienet.h"
@@ -47,6 +52,9 @@
#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
+#define DMA_NUM_APP_WORDS 5
+#define LEN_APP 4
+#define RX_BUF_NUM_DEFAULT 128
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
@@ -55,6 +63,8 @@
#define AXIENET_REGS_N 40
+static void axienet_rx_submit_desc(struct net_device *ndev);
+
/* Match table for of_platform binding */
static const struct of_device_id axienet_of_match[] = {
{ .compatible = "xlnx,axi-ethernet-1.00.a", },
@@ -120,6 +130,16 @@ static struct axienet_option axienet_options[] = {
{}
};
+static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
+{
+ return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
+}
+
+static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
+{
+ return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
+}
+
/**
* axienet_dma_in32 - Memory mapped Axi DMA register read
* @lp: Pointer to axienet local structure
@@ -589,10 +609,6 @@ static int axienet_device_reset(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
int ret;
- ret = __axienet_device_reset(lp);
- if (ret)
- return ret;
-
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
@@ -606,11 +622,17 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_JUMBO;
}
- ret = axienet_dma_bd_init(ndev);
- if (ret) {
- netdev_err(ndev, "%s: descriptor allocation failed\n",
- __func__);
- return ret;
+ if (!lp->use_dmaengine) {
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
+
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
+ return ret;
+ }
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -726,6 +748,128 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
}
/**
+ * axienet_dma_tx_cb - DMA engine callback for TX channel.
+ * @data: Pointer to the axienet_local structure.
+ * @result: error reporting through dmaengine_result.
+ * This function is called by dmaengine driver for TX channel to notify
+ * that the transmit is done.
+ */
+static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
+{
+ struct skbuf_dma_descriptor *skbuf_dma;
+ struct axienet_local *lp = data;
+ struct netdev_queue *txq;
+ int len;
+
+ skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
+ len = skbuf_dma->skb->len;
+ txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_bytes, len);
+ u64_stats_add(&lp->tx_packets, 1);
+ u64_stats_update_end(&lp->tx_stat_sync);
+ dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
+ dev_consume_skb_any(skbuf_dma->skb);
+ netif_txq_completed_wake(txq, 1, len,
+ CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
+ 2 * MAX_SKB_FRAGS);
+}
+
+/**
+ * axienet_start_xmit_dmaengine - Starts the transmission.
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK on success or any non space errors.
+ * NETDEV_TX_BUSY when free element in TX skb ring buffer
+ * is not available.
+ *
+ * This function is invoked to initiate transmission. The
+ * function sets the skbs, register dma callback API and submit
+ * the dma transaction.
+ * Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static netdev_tx_t
+axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct dma_async_tx_descriptor *dma_tx_desc = NULL;
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
+ struct skbuf_dma_descriptor *skbuf_dma;
+ struct dma_device *dma_dev;
+ struct netdev_queue *txq;
+ u32 csum_start_off;
+ u32 csum_index_off;
+ int sg_len;
+ int ret;
+
+ dma_dev = lp->tx_chan->device;
+ sg_len = skb_shinfo(skb)->nr_frags + 1;
+ if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
+ netif_stop_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
+ if (!skbuf_dma)
+ goto xmit_error_drop_skb;
+
+ lp->tx_ring_head++;
+ sg_init_table(skbuf_dma->sgl, sg_len);
+ ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
+ if (ret < 0)
+ goto xmit_error_drop_skb;
+
+ ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
+ if (!ret)
+ goto xmit_error_drop_skb;
+
+ /* Fill up app fields for checksum */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ /* Tx Full Checksum Offload Enabled */
+ app_metadata[0] |= 2;
+ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
+ csum_start_off = skb_transport_offset(skb);
+ csum_index_off = csum_start_off + skb->csum_offset;
+ /* Tx Partial Checksum Offload Enabled */
+ app_metadata[0] |= 1;
+ app_metadata[1] = (csum_start_off << 16) | csum_index_off;
+ }
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
+ }
+
+ dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
+ sg_len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT, (void *)app_metadata);
+ if (!dma_tx_desc)
+ goto xmit_error_unmap_sg;
+
+ skbuf_dma->skb = skb;
+ skbuf_dma->sg_len = sg_len;
+ dma_tx_desc->callback_param = lp;
+ dma_tx_desc->callback_result = axienet_dma_tx_cb;
+ dmaengine_submit(dma_tx_desc);
+ dma_async_issue_pending(lp->tx_chan);
+ txq = skb_get_tx_queue(lp->ndev, skb);
+ netdev_tx_sent_queue(txq, skb->len);
+ netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
+ MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+
+ return NETDEV_TX_OK;
+
+xmit_error_unmap_sg:
+ dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
+xmit_error_drop_skb:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
* axienet_tx_poll - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @napi: Pointer to NAPI structure.
@@ -892,6 +1036,42 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/**
+ * axienet_dma_rx_cb - DMA engine callback for RX channel.
+ * @data: Pointer to the skbuf_dma_descriptor structure.
+ * @result: error reporting through dmaengine_result.
+ * This function is called by dmaengine driver for RX channel to notify
+ * that the packet is received.
+ */
+static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+{
+ struct skbuf_dma_descriptor *skbuf_dma;
+ size_t meta_len, meta_max_len, rx_len;
+ struct axienet_local *lp = data;
+ struct sk_buff *skb;
+ u32 *app_metadata;
+
+ skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
+ skb = skbuf_dma->skb;
+ app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
+ &meta_max_len);
+ dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ /* TODO: Derive app word index programmatically */
+ rx_len = (app_metadata[LEN_APP] & 0xFFFF);
+ skb_put(skb, rx_len);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ __netif_rx(skb);
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, 1);
+ u64_stats_add(&lp->rx_bytes, rx_len);
+ u64_stats_update_end(&lp->rx_stat_sync);
+ axienet_rx_submit_desc(lp->ndev);
+ dma_async_issue_pending(lp->rx_chan);
+}
+
+/**
* axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
* @napi: Pointer to NAPI structure.
* @budget: Max number of RX packets to process.
@@ -1125,40 +1305,158 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
static void axienet_dma_err_handler(struct work_struct *work);
/**
- * axienet_open - Driver open routine.
- * @ndev: Pointer to net_device structure
+ * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
+ * allocate skbuff, map the scatterlist and obtain a descriptor
+ * and then add the callback information and submit descriptor.
+ *
+ * @ndev: net_device pointer
+ *
+ */
+static void axienet_rx_submit_desc(struct net_device *ndev)
+{
+ struct dma_async_tx_descriptor *dma_rx_desc = NULL;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct skbuf_dma_descriptor *skbuf_dma;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
+ if (!skbuf_dma)
+ return;
+
+ lp->rx_ring_head++;
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ return;
+
+ sg_init_table(skbuf_dma->sgl, 1);
+ addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lp->dev, addr))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "DMA mapping error\n");
+ goto rx_submit_err_free_skb;
+ }
+ sg_dma_address(skbuf_dma->sgl) = addr;
+ sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
+ dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma_rx_desc)
+ goto rx_submit_err_unmap_skb;
+
+ skbuf_dma->skb = skb;
+ skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
+ skbuf_dma->desc = dma_rx_desc;
+ dma_rx_desc->callback_param = lp;
+ dma_rx_desc->callback_result = axienet_dma_rx_cb;
+ dmaengine_submit(dma_rx_desc);
+
+ return;
+
+rx_submit_err_unmap_skb:
+ dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
+rx_submit_err_free_skb:
+ dev_kfree_skb(skb);
+}
+
+/**
+ * axienet_init_dmaengine - init the dmaengine code.
+ * @ndev: Pointer to net_device structure
*
* Return: 0, on success.
- * non-zero error value on failure
+ * non-zero error value on failure
*
- * This is the driver open routine. It calls phylink_start to start the
- * PHY device.
- * It also allocates interrupt service routines, enables the interrupt lines
- * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
- * descriptors are initialized.
+ * This is the dmaengine initialization code.
*/
-static int axienet_open(struct net_device *ndev)
+static int axienet_init_dmaengine(struct net_device *ndev)
{
- int ret;
struct axienet_local *lp = netdev_priv(ndev);
+ struct skbuf_dma_descriptor *skbuf_dma;
+ int i, ret;
- dev_dbg(&ndev->dev, "axienet_open()\n");
+ lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
+ if (IS_ERR(lp->tx_chan)) {
+ dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
+ return PTR_ERR(lp->tx_chan);
+ }
- /* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- axienet_lock_mii(lp);
- ret = axienet_device_reset(ndev);
- axienet_unlock_mii(lp);
+ lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
+ if (IS_ERR(lp->rx_chan)) {
+ ret = PTR_ERR(lp->rx_chan);
+ dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
+ goto err_dma_release_tx;
+ }
- ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
- if (ret) {
- dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
- return ret;
+ lp->tx_ring_tail = 0;
+ lp->tx_ring_head = 0;
+ lp->rx_ring_tail = 0;
+ lp->rx_ring_head = 0;
+ lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
+ GFP_KERNEL);
+ if (!lp->tx_skb_ring) {
+ ret = -ENOMEM;
+ goto err_dma_release_rx;
+ }
+ for (i = 0; i < TX_BD_NUM_MAX; i++) {
+ skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
+ if (!skbuf_dma) {
+ ret = -ENOMEM;
+ goto err_free_tx_skb_ring;
+ }
+ lp->tx_skb_ring[i] = skbuf_dma;
}
- phylink_start(lp->phylink);
+ lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
+ GFP_KERNEL);
+ if (!lp->rx_skb_ring) {
+ ret = -ENOMEM;
+ goto err_free_tx_skb_ring;
+ }
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
+ skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
+ if (!skbuf_dma) {
+ ret = -ENOMEM;
+ goto err_free_rx_skb_ring;
+ }
+ lp->rx_skb_ring[i] = skbuf_dma;
+ }
+ /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+ axienet_rx_submit_desc(ndev);
+ dma_async_issue_pending(lp->rx_chan);
+
+ return 0;
+
+err_free_rx_skb_ring:
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+ kfree(lp->rx_skb_ring[i]);
+ kfree(lp->rx_skb_ring);
+err_free_tx_skb_ring:
+ for (i = 0; i < TX_BD_NUM_MAX; i++)
+ kfree(lp->tx_skb_ring[i]);
+ kfree(lp->tx_skb_ring);
+err_dma_release_rx:
+ dma_release_channel(lp->rx_chan);
+err_dma_release_tx:
+ dma_release_channel(lp->tx_chan);
+ return ret;
+}
+
+/**
+ * axienet_init_legacy_dma - init the dma legacy code.
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0, on success.
+ * non-zero error value on failure
+ *
+ * This is the dma initialization code. It also allocates interrupt
+ * service routines, enables the interrupt lines and ISR handling.
+ *
+ */
+static int axienet_init_legacy_dma(struct net_device *ndev)
+{
+ int ret;
+ struct axienet_local *lp = netdev_priv(ndev);
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
@@ -1193,14 +1491,77 @@ err_rx_irq:
err_tx_irq:
napi_disable(&lp->napi_tx);
napi_disable(&lp->napi_rx);
- phylink_stop(lp->phylink);
- phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
/**
+ * axienet_open - Driver open routine.
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0, on success.
+ * non-zero error value on failure
+ *
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
+ * It also allocates interrupt service routines, enables the interrupt lines
+ * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
+ * descriptors are initialized.
+ */
+static int axienet_open(struct net_device *ndev)
+{
+ int ret;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "%s\n", __func__);
+
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ */
+ axienet_lock_mii(lp);
+ ret = axienet_device_reset(ndev);
+ axienet_unlock_mii(lp);
+
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ if (ret) {
+ dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+ return ret;
+ }
+
+ phylink_start(lp->phylink);
+
+ if (lp->use_dmaengine) {
+ /* Enable interrupts for Axi Ethernet core (if defined) */
+ if (lp->eth_irq > 0) {
+ ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret)
+ goto err_phy;
+ }
+
+ ret = axienet_init_dmaengine(ndev);
+ if (ret < 0)
+ goto err_free_eth_irq;
+ } else {
+ ret = axienet_init_legacy_dma(ndev);
+ if (ret)
+ goto err_phy;
+ }
+
+ return 0;
+
+err_free_eth_irq:
+ if (lp->eth_irq > 0)
+ free_irq(lp->eth_irq, ndev);
+err_phy:
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+ return ret;
+}
+
+/**
* axienet_stop - Driver stop routine.
* @ndev: Pointer to net_device structure
*
@@ -1213,11 +1574,14 @@ err_tx_irq:
static int axienet_stop(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
+ int i;
dev_dbg(&ndev->dev, "axienet_close()\n");
- napi_disable(&lp->napi_tx);
- napi_disable(&lp->napi_rx);
+ if (!lp->use_dmaengine) {
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+ }
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1225,18 +1589,33 @@ static int axienet_stop(struct net_device *ndev)
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- axienet_dma_stop(lp);
+ if (!lp->use_dmaengine) {
+ axienet_dma_stop(lp);
+ cancel_work_sync(&lp->dma_err_task);
+ free_irq(lp->tx_irq, ndev);
+ free_irq(lp->rx_irq, ndev);
+ axienet_dma_bd_release(ndev);
+ } else {
+ dmaengine_terminate_sync(lp->tx_chan);
+ dmaengine_synchronize(lp->tx_chan);
+ dmaengine_terminate_sync(lp->rx_chan);
+ dmaengine_synchronize(lp->rx_chan);
+
+ for (i = 0; i < TX_BD_NUM_MAX; i++)
+ kfree(lp->tx_skb_ring[i]);
+ kfree(lp->tx_skb_ring);
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+ kfree(lp->rx_skb_ring[i]);
+ kfree(lp->rx_skb_ring);
+
+ dma_release_channel(lp->rx_chan);
+ dma_release_channel(lp->tx_chan);
+ }
axienet_iow(lp, XAE_IE_OFFSET, 0);
- cancel_work_sync(&lp->dma_err_task);
-
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
- free_irq(lp->tx_irq, ndev);
- free_irq(lp->rx_irq, ndev);
-
- axienet_dma_bd_release(ndev);
return 0;
}
@@ -1333,6 +1712,18 @@ static const struct net_device_ops axienet_netdev_ops = {
#endif
};
+static const struct net_device_ops axienet_netdev_dmaengine_ops = {
+ .ndo_open = axienet_open,
+ .ndo_stop = axienet_stop,
+ .ndo_start_xmit = axienet_start_xmit_dmaengine,
+ .ndo_get_stats64 = axienet_get_stats64,
+ .ndo_change_mtu = axienet_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = axienet_ioctl,
+ .ndo_set_rx_mode = axienet_set_multicast_list,
+};
+
/**
* axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
* @ndev: Pointer to net_device structure
@@ -1412,14 +1803,16 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
- data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
- data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
- data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
- data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+ if (!lp->use_dmaengine) {
+ data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+ }
}
static void
@@ -1863,7 +2256,6 @@ static int axienet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
- ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
/* MTU range: 64 - 9000 */
@@ -1880,9 +2272,6 @@ static int axienet_probe(struct platform_device *pdev)
u64_stats_init(&lp->rx_stat_sync);
u64_stats_init(&lp->tx_stat_sync);
- netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
- netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
-
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2008,82 +2397,118 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
}
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (np) {
- struct resource dmares;
+ if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev,
- "unable to get DMA resource\n");
+ if (np) {
+ struct resource dmares;
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get DMA resource\n");
+ of_node_put(np);
+ goto cleanup_clk;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
+ } else {
+ /* Check for these resources directly on the Ethernet node. */
+ lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ lp->rx_irq = platform_get_irq(pdev, 1);
+ lp->tx_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
+ }
+ if (IS_ERR(lp->dma_regs)) {
+ dev_err(&pdev->dev, "could not map DMA regs\n");
+ ret = PTR_ERR(lp->dma_regs);
+ goto cleanup_clk;
+ }
+ if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
+ dev_err(&pdev->dev, "could not determine irqs\n");
+ ret = -ENOMEM;
goto cleanup_clk;
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev,
- &dmares);
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
- lp->eth_irq = platform_get_irq_optional(pdev, 0);
- } else {
- /* Check for these resources directly on the Ethernet node. */
- lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
- lp->rx_irq = platform_get_irq(pdev, 1);
- lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq_optional(pdev, 2);
- }
- if (IS_ERR(lp->dma_regs)) {
- dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto cleanup_clk;
- }
- if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto cleanup_clk;
- }
- /* Reset core now that clocks are enabled, prior to accessing MDIO */
- ret = __axienet_device_reset(lp);
- if (ret)
- goto cleanup_clk;
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
- /* Autodetect the need for 64-bit DMA pointers.
- * When the IP is configured for a bus width bigger than 32 bits,
- * writing the MSB registers is mandatory, even if they are all 0.
- * We can detect this case by writing all 1's to one such register
- * and see if that sticks: when the IP is configured for 32 bits
- * only, those registers are RES0.
- * Those MSB registers were introduced in IP v7.1, which we check first.
- */
- if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
- void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
-
- iowrite32(0x0, desc);
- if (ioread32(desc) == 0) { /* sanity check */
- iowrite32(0xffffffff, desc);
- if (ioread32(desc) > 0) {
- lp->features |= XAE_FEATURE_DMA_64BIT;
- addr_width = 64;
- dev_info(&pdev->dev,
- "autodetected 64-bit DMA range\n");
- }
iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+ if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
}
- }
- if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
- dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
- ret = -EINVAL;
- goto cleanup_clk;
- }
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
- if (ret) {
- dev_err(&pdev->dev, "No suitable DMA available\n");
- goto cleanup_clk;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto cleanup_clk;
+ }
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
+ } else {
+ struct xilinx_vdma_config cfg;
+ struct dma_chan *tx_chan;
+
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
+ if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
+ ret = lp->eth_irq;
+ goto cleanup_clk;
+ }
+ tx_chan = dma_request_chan(lp->dev, "tx_chan0");
+ if (IS_ERR(tx_chan)) {
+ ret = PTR_ERR(tx_chan);
+ dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
+ goto cleanup_clk;
+ }
+
+ cfg.reset = 1;
+ /* As name says VDMA but it has support for DMA channel reset */
+ ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Reset channel failed\n");
+ dma_release_channel(tx_chan);
+ goto cleanup_clk;
+ }
+
+ dma_release_channel(tx_chan);
+ lp->use_dmaengine = 1;
}
+ if (lp->use_dmaengine)
+ ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
+ else
+ ndev->netdev_ops = &axienet_netdev_ops;
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
@@ -2099,8 +2524,8 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
ret = axienet_mdio_setup(lp);