aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c46
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
14 files changed, 143 insertions, 73 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 0a1f54de40ce..7366d25f86d0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -648,7 +648,7 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
- cfg | (DEFAULT_PAUSE_TIME - 0x1000));
+ cfg | (DEFAULT_PAUSE_TIME / 2));
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -657,7 +657,7 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
- cfg | (DEFAULT_PAUSE_TIME - 0x1000));
+ cfg | (DEFAULT_PAUSE_TIME / 2));
} else {
/* ALL pause frames received are completely ignored */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 4daba85f836a..4445e23b688b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -83,7 +83,7 @@
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
-#define DEFAULT_PAUSE_TIME 0xFFFF
+#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 4f8893ade710..9980598fe79e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -43,7 +43,7 @@ struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
- u8 entry_sz;
+ u16 entry_sz;
u8 align;
u32 qsize;
};
@@ -143,8 +143,13 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
-#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
-#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define MAX_SCHED_WEIGHT 0xFF
+#define DFLT_RR_WEIGHT 71
+#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
+ / MAX_SCHED_WEIGHT)
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3868898757f6..1f877d2258d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -707,6 +707,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
/* Assign MAC address to VFs*/
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
@@ -721,6 +722,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -1106,6 +1108,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blkaddr == BLKADDR_NIX0)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5e90047b9a62..62e881254b8b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -266,7 +266,9 @@ struct rvu_pfvf {
u16 minlen;
bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
+ bool pf_set_vfs_mac;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
@@ -615,6 +617,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
int rvu_nix_register_interrupts(struct rvu *rvu);
void rvu_nix_unregister_interrupts(struct rvu *rvu);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index bd125d2f1b38..55304fa55643 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2910,10 +2910,11 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- /* Skip updating mac addr if request is from vf */
- if (!from_vf)
- ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ /* VF can't overwrite admin(PF) changes */
+ if (from_vf && pfvf->pf_set_vfs_mac)
+ return -EPERM;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
@@ -3971,3 +3972,12 @@ struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp)
return 0;
}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c587b8dab336..212bfca5cf33 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -979,8 +979,10 @@ update_rule:
pfvf->def_rule = rule;
/* VF's MAC address is being changed via PF */
- if (pf_set_vfs_mac)
- ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ pfvf->pf_set_vfs_mac = true;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index faf3365e2a11..ab6bd745b881 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -126,7 +126,7 @@ void otx2_get_stats64(struct net_device *netdev,
EXPORT_SYMBOL(otx2_get_stats64);
/* Sync MAC address with RVU */
-int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev)
+int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
{
struct nix_set_mac_addr *req;
int err;
@@ -138,7 +138,7 @@ int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev)
return -ENOMEM;
}
- ether_addr_copy(req->mac_addr, netdev->dev_addr);
+ ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox);
@@ -186,9 +186,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- otx2_hw_set_mac_addr(pfvf, netdev);
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ /* update dmac field in vlan offload rule */
+ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ } else {
+ return -EPERM;
+ }
return 0;
}
@@ -364,10 +369,10 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
pool->page_offset = 0;
ret:
- iova = (u64)dma_map_page_attrs(pfvf->dev, pool->page,
- pool->page_offset, pool->rbsize,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
+ pool->rbsize, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (!iova) {
if (!pool->page_offset)
__free_pages(pool->page, 0);
pool->page = NULL;
@@ -450,15 +455,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
/* Set DWRR quantum */
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
- req->regval[2] = pfvf->netdev->mtu;
+ req->regval[2] = DFLT_RR_QTM;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -466,7 +477,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24;
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
@@ -518,7 +529,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
int otx2_txschq_stop(struct otx2_nic *pfvf)
{
struct nix_txsch_free_req *free_req;
- int lvl, schq;
+ int lvl, schq, err;
otx2_mbox_lock(&pfvf->mbox);
/* Free the transmit schedulers */
@@ -529,7 +540,7 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
}
free_req->flags = TXSCHQ_FREE_ALL;
- WARN_ON(otx2_sync_mbox_msg(&pfvf->mbox));
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox);
/* Clear the txschq list */
@@ -537,7 +548,7 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
pfvf->hw.txschq_list[lvl][schq] = 0;
}
- return 0;
+ return err;
}
/* RED and drop levels of CQ on packet reception.
@@ -649,7 +660,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = OTX2_MAX_MTU;
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -1299,7 +1310,10 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
req->ctype = type;
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
+ __func__);
+
otx2_mbox_unlock(mbox);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index f5c1e792f26e..b88763010b85 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/iommu.h>
#include <mbox.h>
#include "otx2_reg.h"
@@ -145,7 +146,7 @@ struct mbox {
struct work_struct mbox_up_wrk;
struct otx2_nic *pfvf;
void *bbuf_base; /* Bounce buffer for mbox memory */
- atomic_t lock; /* serialize mailbox access */
+ struct mutex lock; /* serialize mailbox access */
int num_msgs; /*mbox number of messages*/
int up_num_msgs;/* mbox_up number of messages*/
};
@@ -236,6 +237,7 @@ struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
void *iommu_domain;
+ u16 iommu_domain_type;
u16 xtra_hdr;
u16 max_frs;
@@ -427,18 +429,17 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
static inline void otx2_mbox_lock_init(struct mbox *mbox)
{
- atomic_set(&mbox->lock, 0);
+ mutex_init(&mbox->lock);
}
static inline void otx2_mbox_lock(struct mbox *mbox)
{
- while (!(atomic_add_return(1, &mbox->lock) == 1))
- cpu_relax();
+ mutex_lock(&mbox->lock);
}
static inline void otx2_mbox_unlock(struct mbox *mbox)
{
- atomic_set(&mbox->lock, 0);
+ mutex_unlock(&mbox->lock);
}
/* With the absence of API for 128-bit IO memory access for arm64,
@@ -631,11 +632,40 @@ static inline int rvu_get_pf(u16 pcifunc)
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
}
+static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t iova;
+
+ if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY)
+ return page_to_phys(page) + offset;
+
+ iova = dma_map_page_attrs(pfvf->dev, page,
+ offset, size, dir, attrs);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova)))
+ return (dma_addr_t)NULL;
+ return iova;
+}
+
+static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY)
+ return;
+
+ dma_unmap_page_attrs(pfvf->dev, addr, size, dir, attrs);
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
-int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev);
+int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac);
int otx2_set_mac_address(struct net_device *netdev, void *p);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev);
@@ -724,6 +754,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
int otx2smqvf_probe(struct otx2_nic *vf);
int otx2smqvf_remove(struct otx2_nic *vf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 96a4dbba25a3..39f439202235 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -506,11 +506,14 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
return 0;
}
-static int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
{
struct npc_install_flow_req *req;
int err;
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return -ENOMEM;
+
otx2_mbox_lock(&pfvf->mbox);
req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
if (!req) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index ea37e652939f..0698e2a76434 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -102,7 +102,6 @@ static void otx2_flr_handler(struct work_struct *work)
struct flr_work *flrwork = container_of(work, struct flr_work, work);
struct otx2_nic *pf = flrwork->pf;
struct msg_req *req;
- struct msg_rsp *rsp;
int vf, reg = 0;
vf = flrwork - pf->flr_wrk;
@@ -122,11 +121,6 @@ static void otx2_flr_handler(struct work_struct *work)
reg = 1;
vf = vf - 64;
}
- rsp = (struct msg_rsp *)
- otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
- otx2_mbox_unlock(&pf->mbox);
- if (rsp->hdr.rc)
- return;
/* clear transcation pending bit */
otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
@@ -377,9 +371,7 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
src_mdev = &src_mbox->dev[vf];
mbox_hdr = src_mbox->hwbase +
src_mbox->rx_start + (vf * MBOX_SIZE);
- req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
- src_mbox->rx_start);
- req_hdr->num_msgs = num_msgs;
+
dst_mbox = &pf->mbox;
dst_size = dst_mbox->mbox.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
@@ -392,7 +384,7 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
otx2_mbox_lock(&pf->mbox);
dst_mdev->mbase = src_mdev->mbase;
dst_mdev->msg_size = mbox_hdr->msg_size;
- dst_mdev->num_msgs = mbox_hdr->num_msgs;
+ dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
if (err) {
dev_warn(pf->dev,
@@ -829,10 +821,6 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(mbox, 0);
-
- /* Clear the IRQ */
- smp_wmb();
- otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
}
static void otx2_handle_link_event(struct otx2_nic *pf)
@@ -1384,7 +1372,8 @@ err_free_nix_queues:
otx2_free_cq_res(pf);
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
err_free_txsch:
- otx2_txschq_stop(pf);
+ if (otx2_txschq_stop(pf))
+ dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
err_free_sq_ptrs:
otx2_sq_free_sqbs(pf);
err_free_rq_ptrs:
@@ -1397,13 +1386,16 @@ err_free_nix_lf:
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (free_req) {
free_req->flags = NIX_LF_DISABLE_FLOWS;
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
err_free_npa_lf:
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
- if (req)
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
exit:
otx2_mbox_unlock(mbox);
return err;
@@ -1453,7 +1445,8 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (free_req) {
free_req->flags = NIX_LF_DISABLE_FLOWS;
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
otx2_mbox_unlock(mbox);
@@ -1465,8 +1458,10 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_mbox_lock(mbox);
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
- if (req)
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
otx2_mbox_unlock(mbox);
}
@@ -2271,11 +2266,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* So the only way to convert Rx packet's buffer address is to use
* IOMMU's iova_to_phys() handler which translates the address by
* walking through the translation tables.
- *
- * So check if device is binded to IOMMU, otherwise translation is
- * not needed.
*/
pf->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (pf->iommu_domain)
+ pf->iommu_domain_type =
+ ((struct iommu_domain *)pf->iommu_domain)->type;
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index e84cf3cc67fd..84271cddd645 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -145,7 +145,9 @@
#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 179d90a761c5..6864fa315900 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -61,8 +61,8 @@ static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
offset = frag->page_offset;
*len = skb_frag_size(frag);
}
- return dma_map_page_attrs(pfvf->dev, page, offset, *len,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ return otx2_dma_map_page(pfvf, page, offset, *len,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
@@ -70,9 +70,9 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
int seg;
for (seg = 0; seg < sg->num_segs; seg++) {
- dma_unmap_page_attrs(pfvf->dev, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
sg->num_segs = 0;
}
@@ -215,8 +215,8 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
va - page_address(page) + off, len - off, RCV_FRAG_LEN);
- dma_unmap_page_attrs(pfvf->dev, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
@@ -983,10 +983,10 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
*iova -= OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain,
*iova);
- dma_unmap_page_attrs(pfvf->dev, *iova,
- RCV_FRAG_LEN,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, *iova,
+ RCV_FRAG_LEN,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
put_page(virt_to_page(phys_to_virt(pa)));
iova++;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 129c693e62aa..218ce0382adb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -132,10 +132,6 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(mbox, 0);
-
- /* Clear the IRQ */
- smp_wmb();
- otx2_write64(af_mbox->pfvf, RVU_VF_INT, BIT_ULL(0));
}
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
@@ -535,6 +531,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vf->pdev = pdev;
vf->dev = dev;
vf->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (vf->iommu_domain)
+ vf->iommu_domain_type =
+ ((struct iommu_domain *)vf->iommu_domain)->type;
+
vf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &vf->hw;
hw->pdev = vf->pdev;