diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 1772 |
1 files changed, 1398 insertions, 374 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4a7609fd6dd0..55304fa55643 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -16,8 +16,12 @@ #include "rvu.h" #include "npc.h" #include "cgx.h" +#include "rvu_fixes.h" +static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, + int type, int chan_id); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -64,7 +68,6 @@ enum nix_makr_fmt_indexes { struct mce { struct hlist_node node; - u16 idx; u16 pcifunc; }; @@ -128,16 +131,17 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr) if (err) dev_err(rvu->dev, "NIX RX software sync failed\n"); - /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA] + /* As per a HW errata in 96xx A0 silicon, HW may clear SW_SYNC[ENA] * bit too early. Hence wait for 50us more. */ - if (is_rvu_9xxx_A0(rvu)) + if (is_rvu_96xx_A0(rvu)) usleep_range(50, 60); } static bool is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq) { + struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; u16 map_func; @@ -155,13 +159,15 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); mutex_unlock(&rvu->rsrc_lock); - /* For TL1 schq, sharing across VF's of same PF is ok */ - if (lvl == NIX_TXSCH_LVL_TL1 && - rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) - return false; + /* TLs aggegating traffic are shared across PF and VFs */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) + return false; + else + return true; + } - if (lvl != NIX_TXSCH_LVL_TL1 && - map_func != pcifunc) + if (map_func != pcifunc) return false; return true; @@ -175,7 +181,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) int err; pf = rvu_get_pf(pcifunc); - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; switch (type) { @@ -195,9 +202,19 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->tx_chan_cnt = 1; cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); + + /* By default we enable pause frames */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) + cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true, true); break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + + /* Note that AF's VFs work in pairs and talk over consecutive + * loopback channels.Therefore if odd number of AF VFs are + * enabled then the last VF remains with no pair. + */ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : NIX_CHAN_LBK_CHX(0, vf + 1); @@ -206,6 +223,15 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, false); break; + case NIX_INTF_TYPE_SDP: + /* Added single interface and single channel support for now */ + pfvf->rx_chan_base = NIX_CHAN_SDP_CHX(0); + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->rx_chan_cnt = 1; + pfvf->tx_chan_cnt = 1; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, false); + break; } /* Add a UCAST forwarding rule in MCAM with this NIXLF attached @@ -238,7 +264,6 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) pfvf->maxlen = 0; pfvf->minlen = 0; - pfvf->rxvlan = false; /* Remove this PF_FUNC from bcast pkt replication list */ err = nix_update_bcast_mce_list(rvu, pcifunc, false); @@ -248,8 +273,147 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) pcifunc); } - /* Free and disable any MCAM entries used by this NIX LF */ - rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + /* Free any tx vtag def entries used by this NIX LF */ + nix_free_tx_vtag_entries(rvu, pcifunc); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); +} + +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int blkaddr, pf, type; + u16 chan_base, chan; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + + chan_base = pfvf->rx_chan_base + req->chan_base; + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + cfg & ~BIT_ULL(16)); + } + return 0; +} + +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, + int type, int chan_id) +{ + int bpid, blkaddr, lmac_chan_cnt; + struct rvu_hwinfo *hw = rvu->hw; + u16 cgx_bpid_cnt, lbk_bpid_cnt; + struct rvu_pfvf *pfvf; + u8 cgx_id, lmac_id; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + lmac_chan_cnt = cfg & 0xFF; + + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; + lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + + /* Backpressure IDs range division + * CGX channles are mapped to (0 - 191) BPIDs + * LBK channles are mapped to (192 - 255) BPIDs + * SDP channles are mapped to (256 - 511) BPIDs + * + * Lmac channles and bpids mapped as follows + * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) + * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... + * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... + */ + switch (type) { + case NIX_INTF_TYPE_CGX: + if ((req->chan_base + req->chan_cnt) > 15) + return -EINVAL; + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); + /* Assign bpid based on cgx, lmac and chan id */ + bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + + (lmac_id * lmac_chan_cnt) + req->chan_base; + + if (req->bpid_per_chan) + bpid += chan_id; + if (bpid > cgx_bpid_cnt) + return -EINVAL; + break; + + case NIX_INTF_TYPE_LBK: + if ((req->chan_base + req->chan_cnt) > 63) + return -EINVAL; + bpid = cgx_bpid_cnt + req->chan_base; + if (req->bpid_per_chan) + bpid += chan_id; + if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) + return -EINVAL; + break; + default: + return -EINVAL; + } + return bpid; +} + +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + int blkaddr, pf, type, chan_id = 0; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + u16 chan_base, chan; + s16 bpid, bpid_base; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + + /* Enable backpressure only for CGX mapped PFs and LBK interface */ + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + + bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); + chan_base = pfvf->rx_chan_base + req->chan_base; + bpid = bpid_base; + + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { + if (bpid < 0) { + dev_warn(rvu->dev, "Fail to enable backpessure\n"); + return -EINVAL; + } + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + cfg | (bpid & 0xFF) | BIT_ULL(16)); + chan_id++; + bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); + } + + for (chan = 0; chan < req->chan_cnt; chan++) { + /* Map channel and bpid assign to it */ + rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | + (bpid_base & 0x3FF); + if (req->bpid_per_chan) + bpid_base++; + } + rsp->chan_cnt = req->chan_cnt; + + return 0; } static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, @@ -382,7 +546,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, - int rss_sz, int rss_grps, int hwctx_size) + int rss_sz, int rss_grps, int hwctx_size, + u64 way_mask) { int err, grp, num_indices; @@ -402,7 +567,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, /* Config full RSS table size, enable RSS and caching */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), BIT_ULL(36) | BIT_ULL(4) | - ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE)); + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | + way_mask << 20); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), @@ -530,6 +696,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, pcifunc, req->sq.smq)) return NIX_AF_ERR_AQ_ENQUEUE; + rvu_nix_update_sq_smq_mapping(rvu, blkaddr, nixlf, req->qidx, + req->sq.smq); } memset(&inst, 0, sizeof(struct nix_aq_inst_s)); @@ -542,6 +710,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, */ inst.res_addr = (u64)aq->res->iova; + /* Hardware uses same aq->res->base for updating result of + * previous instruction hence wait here till it is done. + */ + spin_lock(&aq->lock); + /* Clean result + context memory */ memset(aq->res->base, 0, aq->res->entry_sz); /* Context needs to be written at RES_ADDR + 128 */ @@ -586,11 +759,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, break; default: rc = NIX_AF_ERR_AQ_ENQUEUE; + spin_unlock(&aq->lock); return rc; } - spin_lock(&aq->lock); - /* Submit the instruction to AQ */ rc = nix_aq_enqueue_wait(rvu, block, &inst); if (rc) { @@ -663,6 +835,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, return 0; } +static const char *nix_get_ctx_name(int ctype) +{ + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + return "CQ"; + case NIX_AQ_CTYPE_SQ: + return "SQ"; + case NIX_AQ_CTYPE_RQ: + return "RQ"; + case NIX_AQ_CTYPE_RSS: + return "RSS"; + } + return ""; +} + static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -680,6 +867,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) if (req->ctype == NIX_AQ_CTYPE_CQ) { aq_req.cq.ena = 0; aq_req.cq_mask.ena = 1; + aq_req.cq.bp_ena = 0; + aq_req.cq_mask.bp_ena = 1; q_cnt = pfvf->cq_ctx->qsize; bmap = pfvf->cq_bmap; } @@ -707,21 +896,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) if (rc) { err = rc; dev_err(rvu->dev, "Failed to disable %s:%d context\n", - (req->ctype == NIX_AQ_CTYPE_CQ) ? - "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ? - "RQ" : "SQ"), qidx); + nix_get_ctx_name(req->ctype), qidx); } } return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) +{ + struct nix_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NIX_AQ_INSTOP_INIT) + return 0; + + if (req->ctype == NIX_AQ_CTYPE_MCE || + req->ctype == NIX_AQ_CTYPE_DYNO) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; + lock_ctx_req.qidx = req->qidx; + err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", + req->hdr.pcifunc, + nix_get_ctx_name(req->ctype), req->qidx); + return err; +} + +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_nix_aq_enq_inst(rvu, req, rsp); + if (!err) + err = nix_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } +#endif int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -735,16 +963,20 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_rsp *rsp) { int nixlf, qints, hwctx_size, intf, err, rc = 0; + struct rvu_pfvf *pfvf, *parent_pf; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; - struct rvu_pfvf *pfvf; u64 cfg, ctx_cfg; int blkaddr; if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) return NIX_AF_ERR_PARAM; + if (req->way_mask) + req->way_mask &= 0xFFFF; + + parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) @@ -810,7 +1042,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, (u64)pfvf->rq_ctx->iova); /* Set caching and queue count in HW */ - cfg = BIT_ULL(36) | (req->rq_cnt - 1); + cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); /* Alloc NIX SQ HW context memory and config the base */ @@ -825,7 +1057,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), (u64)pfvf->sq_ctx->iova); - cfg = BIT_ULL(36) | (req->sq_cnt - 1); + + cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); /* Alloc NIX CQ HW context memory and config the base */ @@ -840,13 +1073,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), (u64)pfvf->cq_ctx->iova); - cfg = BIT_ULL(36) | (req->cq_cnt - 1); + + cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); - err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, - req->rss_sz, req->rss_grps, hwctx_size); + err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, + req->rss_grps, hwctx_size, req->way_mask); if (err) goto free_mem; @@ -860,7 +1094,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), (u64)pfvf->cq_ints_ctx->iova); - rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36)); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); /* Alloc memory for QINT's HW contexts */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); @@ -872,7 +1108,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), (u64)pfvf->nix_qints_ctx->iova); - rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); /* Setup VLANX TPID's. * Use VLAN1 for 802.1Q @@ -896,7 +1133,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Config Rx pkt length, csum checks and apad enable / disable */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); + /* Configure pkind for TX parse config, 63 from npc_profile */ + cfg = NPC_TX_DEF_PKIND; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_sdp_pf(pcifunc)) + intf = NIX_INTF_TYPE_SDP; + err = nix_interface_init(rvu, pcifunc, intf, nixlf); if (err) goto free_mem; @@ -931,10 +1175,11 @@ exit: cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); rsp->qints = ((cfg >> 12) & 0xFFF); rsp->cints = ((cfg >> 24) & 0xFFF); + rsp->hw_rx_tstamp_en = parent_pf->hw_rx_tstamp_en; return rc; } -int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -953,6 +1198,11 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + if (req->flags & NIX_LF_DISABLE_FLOWS) + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + else + rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); + nix_interface_deinit(rvu, pcifunc, nixlf); /* Reset this NIX LF */ @@ -1048,6 +1298,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, struct rvu_hwinfo *hw = rvu->hw; int link; + if (lvl >= hw->cap.nix_tx_aggr_lvl) + return; + /* Reset TL4's SDP link config */ if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); @@ -1061,83 +1314,185 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } -static int -rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc, - u16 *schq_list, u16 *schq_cnt) +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { - struct nix_txsch *txsch; - struct nix_hw *nix_hw; - struct rvu_pfvf *pfvf; - u8 cgx_id, lmac_id; - u16 schq_base; - u32 *pfvf_map; - int pf, intf; + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return -ENODEV; + if (is_afvf(pcifunc)) {/* LBK links */ + return hw->cgx_links; + } else if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return (cgx_id * hw->lmac_per_cgx) + lmac_id; + } - pfvf = rvu_get_pfvf(rvu, pcifunc); - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; - pfvf_map = txsch->pfvf_map; - pf = rvu_get_pf(pcifunc); + /* SDP link */ + return hw->cgx_links + hw->lbk_links; +} - /* static allocation as two TL1's per link */ - intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; +static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, + int link, int *start, int *end) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); - switch (intf) { - case NIX_INTF_TYPE_CGX: - rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); - schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2; - break; - case NIX_INTF_TYPE_LBK: - schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2; - break; - default: - return -ENODEV; + if (is_afvf(pcifunc)) { /* LBK links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_lbk_lmac; + } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_cgx_lmac; + } else { /* SDP link */ + *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + + (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); + *end = *start + hw->cap.nix_txsch_per_sdp_lmac; } +} - if (schq_base + 1 > txsch->schq.max) - return -ENODEV; +static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, + struct nix_hw *nix_hw, + struct nix_txsch_alloc_req *req) +{ + struct rvu_hwinfo *hw = rvu->hw; + int schq, req_schq, free_cnt; + struct nix_txsch *txsch; + int link, start, end; - /* init pfvf_map as we store flags */ - if (pfvf_map[schq_base] == U32_MAX) { - pfvf_map[schq_base] = - TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); - pfvf_map[schq_base + 1] = - TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); + txsch = &nix_hw->txsch[lvl]; + req_schq = req->schq_contig[lvl] + req->schq[lvl]; + + if (!req_schq) + return 0; - /* Onetime reset for TL1 */ - nix_reset_tx_linkcfg(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base); - nix_reset_tx_shaping(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base); + link = nix_get_tx_link(rvu, pcifunc); - nix_reset_tx_linkcfg(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base + 1); - nix_reset_tx_shaping(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base + 1); + /* For traffic aggregating scheduler level, one queue is enough */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (req_schq != 1) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; } - if (schq_list && schq_cnt) { - schq_list[0] = schq_base; - schq_list[1] = schq_base + 1; - *schq_cnt = 2; + /* Get free SCHQ count and check if request can be accomodated */ + if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); + if (end <= txsch->schq.max && schq < end && + !test_bit(schq, txsch->schq.bmap)) + free_cnt = 1; + else + free_cnt = 0; + } else { + free_cnt = rvu_rsrc_free_count(&txsch->schq); } + if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + + /* If contiguous queues are needed, check for availability */ + if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && + !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; } +static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, + struct nix_txsch_alloc_rsp *rsp, + int lvl, int start, int end) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = rsp->hdr.pcifunc; + int idx, schq; + + /* For traffic aggregating levels, queue alloc is based + * on transmit link to which PF_FUNC is mapped to. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + /* A single TL queue is allocated */ + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + rsp->schq_contig_list[lvl][0] = start; + } + + /* Both contig and non-contig reqs doesn't make sense here */ + if (rsp->schq_contig[lvl]) + rsp->schq[lvl] = 0; + + if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + rsp->schq_list[lvl][0] = start; + } + return; + } + + /* Adjust the queue request count if HW supports + * only one queue per level configuration. + */ + if (hw->cap.nix_fixed_txschq_mapping) { + idx = pcifunc & RVU_PFVF_FUNC_MASK; + schq = start + idx; + if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { + rsp->schq_contig[lvl] = 0; + rsp->schq[lvl] = 0; + return; + } + + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][0] = schq; + rsp->schq[lvl] = 0; + } else if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][0] = schq; + } + return; + } + + /* Allocate contiguous queue indices requesty first */ + if (rsp->schq_contig[lvl]) { + schq = bitmap_find_next_zero_area(txsch->schq.bmap, + txsch->schq.max, start, + rsp->schq_contig[lvl], 0); + if (schq >= end) + rsp->schq_contig[lvl] = 0; + for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][idx] = schq; + schq++; + } + } + + /* Allocate non-contiguous queue indices */ + if (rsp->schq[lvl]) { + idx = 0; + for (schq = start; schq < end; schq++) { + if (!test_bit(schq, txsch->schq.bmap)) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][idx++] = schq; + } + if (idx == rsp->schq[lvl]) + break; + } + /* Update how many were allocated */ + rsp->schq[lvl] = idx; + } +} + int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) { + struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; + int link, blkaddr, rc = 0; + int lvl, idx, start, end; struct nix_txsch *txsch; - int lvl, idx, req_schq; struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; - int blkaddr, rc = 0; u32 *pfvf_map; u16 schq; @@ -1151,83 +1506,74 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, return -EINVAL; mutex_lock(&rvu->rsrc_lock); - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - txsch = &nix_hw->txsch[lvl]; - req_schq = req->schq_contig[lvl] + req->schq[lvl]; - pfvf_map = txsch->pfvf_map; - if (!req_schq) - continue; - - /* There are only 28 TL1s */ - if (lvl == NIX_TXSCH_LVL_TL1) { - if (req->schq_contig[lvl] || - req->schq[lvl] > 2 || - rvu_get_tl1_schqs(rvu, blkaddr, - pcifunc, NULL, NULL)) - goto err; - continue; - } - - /* Check if request is valid */ - if (req_schq > MAX_TXSCHQ_PER_FUNC) - goto err; - - /* If contiguous queues are needed, check for availability */ - if (req->schq_contig[lvl] && - !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) - goto err; + /* Check if request can be accommodated as per limits set by admin */ + if (!hw->cap.nix_fixed_txschq_mapping && + rvu_check_txsch_policy(rvu, req, pcifunc)) { + dev_err(rvu->dev, "Func 0x%x: TXSCH policy check failed\n", + pcifunc); + goto err; + } - /* Check if full request can be accommodated */ - if (req_schq >= rvu_rsrc_free_count(&txsch->schq)) + /* Check if request is valid as per HW capabilities + * and can be accomodated. + */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); + if (rc) goto err; } + /* Allocate requested Tx scheduler queues */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; - rsp->schq_contig[lvl] = req->schq_contig[lvl]; pfvf_map = txsch->pfvf_map; - rsp->schq[lvl] = req->schq[lvl]; if (!req->schq[lvl] && !req->schq_contig[lvl]) continue; - /* Handle TL1 specially as it is - * allocation is restricted to 2 TL1's - * per link - */ + rsp->schq[lvl] = req->schq[lvl]; + rsp->schq_contig[lvl] = req->schq_contig[lvl]; - if (lvl == NIX_TXSCH_LVL_TL1) { - rsp->schq_contig[lvl] = 0; - rvu_get_tl1_schqs(rvu, blkaddr, pcifunc, - &rsp->schq_list[lvl][0], - &rsp->schq[lvl]); - continue; + link = nix_get_tx_link(rvu, pcifunc); + + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + start = link; + end = link; + } else if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + } else { + start = 0; + end = txsch->schq.max; } - /* Alloc contiguous queues first */ - if (req->schq_contig[lvl]) { - schq = rvu_alloc_rsrc_contig(&txsch->schq, - req->schq_contig[lvl]); + nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); - for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + /* Reset queue config */ + for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + schq = rsp->schq_contig_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); - nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); - rsp->schq_contig_list[lvl][idx] = schq; - schq++; - } + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); } - /* Alloc non-contiguous queues */ for (idx = 0; idx < req->schq[lvl]; idx++) { - schq = rvu_alloc_rsrc(&txsch->schq); - pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); + schq = rsp->schq_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); - rsp->schq_list[lvl][idx] = schq; } } + + rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; + rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; + rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, + NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; goto exit; err: rc = NIX_AF_ERR_TLX_ALLOC_FAIL; @@ -1236,13 +1582,52 @@ exit: return rc; } +static void nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + int err, restore_tx_en = 0; + u64 cfg; + + /* enable cgx tx if disabled */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + /* Do SMQ flush and set enqueue xoff */ + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Disable backpressure from physical link, + * otherwise SMQ flush may stall. + */ + rvu_cgx_enadis_rx_bp(rvu, pf, false); + + rvu_smqvf_xmit(rvu); + + /* Wait for flush to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); + if (err) + dev_err(rvu->dev, + "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); + + rvu_cgx_enadis_rx_bp(rvu, pf, true); + /* restore cgx tx state */ + if (restore_tx_en) + cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); +} + static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) { int blkaddr, nixlf, lvl, schq, err; struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; - u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1275,26 +1660,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - - /* Wait for flush to complete */ - err = rvu_poll_reg(rvu, blkaddr, - NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); - if (err) { - dev_err(rvu->dev, - "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); - } + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); } /* Now free scheduler queues to free pool */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - /* Free all SCHQ's except TL1 as - * TL1 is shared across all VF's for a RVU PF - */ - if (lvl == NIX_TXSCH_LVL_TL1) + /* TLs above aggregation level are shared across all PF + * and it's VFs, hence skip freeing them. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; txsch = &nix_hw->txsch[lvl]; @@ -1302,14 +1676,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; rvu_free_rsrc(&txsch->schq, schq); - txsch->pfvf_map[schq] = 0; + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } } mutex_unlock(&rvu->rsrc_lock); - /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ - rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); - err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); if (err) dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); @@ -1319,13 +1691,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch_free_req *req) { - int lvl, schq, nixlf, blkaddr, rc; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; + int lvl, schq, nixlf, blkaddr; struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; - u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1343,10 +1714,8 @@ static int nix_txschq_free_one(struct rvu *rvu, schq = req->schq; txsch = &nix_hw->txsch[lvl]; - /* Don't allow freeing TL1 */ - if (lvl > NIX_TXSCH_LVL_TL2 || - schq >= txsch->schq.max) - goto err; + if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) + return 0; pfvf_map = txsch->pfvf_map; mutex_lock(&rvu->rsrc_lock); @@ -1359,24 +1728,12 @@ static int nix_txschq_free_one(struct rvu *rvu, /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ - if (lvl == NIX_TXSCH_LVL_SMQ) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - - /* Wait for flush to complete */ - rc = rvu_poll_reg(rvu, blkaddr, - NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); - if (rc) { - dev_err(rvu->dev, - "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); - } - } + if (lvl == NIX_TXSCH_LVL_SMQ) + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); - txsch->pfvf_map[schq] = 0; + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); mutex_unlock(&rvu->rsrc_lock); return 0; err: @@ -1393,8 +1750,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, return nix_txschq_free_one(rvu, req); } -static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, - int lvl, u64 reg, u64 regval) +static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, + int lvl, u64 reg, u64 regval) { u64 regbase = reg & 0xFFFF; u16 schq, parent; @@ -1431,79 +1788,82 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, return true; } -static int -nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc) +static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) { - u16 schq_list[2], schq_cnt, schq; - int blkaddr, idx, err = 0; - u16 map_func, map_flags; - struct nix_hw *nix_hw; - u64 reg, regval; - u32 *pfvf_map; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return -EINVAL; - - pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; - - mutex_lock(&rvu->rsrc_lock); + u64 regbase; - err = rvu_get_tl1_schqs(rvu, blkaddr, - pcifunc, schq_list, &schq_cnt); - if (err) - goto unlock; + if (hw->cap.nix_shaping) + return true; - for (idx = 0; idx < schq_cnt; idx++) { - schq = schq_list[idx]; - map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); - map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); + /* If shaping and coloring is not supported, then + * *_CIR and *_PIR registers should not be configured. + */ + regbase = reg & 0xFFFF; - /* check if config is already done or this is pf */ - if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE) - continue; + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + if (regbase == NIX_AF_TL1X_CIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL2: + if (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL3: + if (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL4: + if (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)) + return false; + break; + } + return true; +} - /* default configuration */ - reg = NIX_AF_TL1X_TOPOLOGY(schq); - regval = (TXSCH_TL1_DFLT_RR_PRIO << 1); - rvu_write64(rvu, blkaddr, reg, regval); - reg = NIX_AF_TL1X_SCHEDULE(schq); - regval = TXSCH_TL1_DFLT_RR_QTM; - rvu_write64(rvu, blkaddr, reg, regval); - reg = NIX_AF_TL1X_CIR(schq); - regval = 0; - rvu_write64(rvu, blkaddr, reg, regval); +static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, + u16 pcifunc, int blkaddr) +{ + u32 *pfvf_map; + int schq; - map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; - pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); - } -unlock: - mutex_unlock(&rvu->rsrc_lock); - return err; + schq = nix_get_tx_link(rvu, pcifunc); + pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; + /* Skip if PF has already done the config */ + if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) + return; + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), + (TXSCH_TL1_DFLT_RR_PRIO << 1)); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + TXSCH_TL1_DFLT_RR_QTM); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) { - u16 schq, pcifunc = req->hdr.pcifunc; struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; u64 reg, regval, schq_regbase; struct nix_txsch *txsch; - u16 map_func, map_flags; struct nix_hw *nix_hw; int blkaddr, idx, err; + int nixlf, schq; u32 *pfvf_map; - int nixlf; if (req->lvl >= NIX_TXSCH_LVL_CNT || req->num_regs > MAX_REGS_PER_MBOX_MSG) return NIX_AF_INVAL_TXSCHQ_CFG; + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return NIX_AF_ERR_AF_LF_INVALID; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; @@ -1512,29 +1872,33 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, if (!nix_hw) return -EINVAL; - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; - txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; - /* VF is only allowed to trigger - * setting default cfg on TL1 - */ - if (pcifunc & RVU_PFVF_FUNC_MASK && - req->lvl == NIX_TXSCH_LVL_TL1) { - return nix_tl1_default_cfg(rvu, pcifunc); + if (req->lvl >= hw->cap.nix_tx_aggr_lvl && + pcifunc & RVU_PFVF_FUNC_MASK) { + mutex_lock(&rvu->rsrc_lock); + if (req->lvl == NIX_TXSCH_LVL_TL1) + nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); + mutex_unlock(&rvu->rsrc_lock); + return 0; } + rvu_nix_txsch_lock(nix_hw); for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; - if (!is_txschq_config_valid(rvu, pcifunc, blkaddr, - txsch->lvl, reg, regval)) + if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, + txsch->lvl, reg, regval)) { + rvu_nix_txsch_unlock(nix_hw); return NIX_AF_INVAL_TXSCHQ_CFG; + } + + /* Check if shaping and coloring is supported */ + if (!is_txschq_shaping_valid(hw, req->lvl, reg)) + continue; /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { @@ -1544,32 +1908,38 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, regval |= ((u64)nixlf << 24); } + /* Clear 'BP_ENA' config, if it's not allowed */ + if (!hw->cap.nix_tx_link_bp) { + if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || + (schq_regbase & 0xFF00) == + NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) + regval &= ~BIT_ULL(13); + } + /* Mark config as done for TL1 by PF */ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); - mutex_lock(&rvu->rsrc_lock); - - map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); - map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); - - map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; - pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], + NIX_TXSCHQ_CFG_DONE); mutex_unlock(&rvu->rsrc_lock); } - rvu_write64(rvu, blkaddr, reg, regval); - - /* Check for SMQ flush, if so, poll for its completion */ + /* SMQ flush is special hence split register writes such + * that flush first and write rest of the bits later. + */ if (schq_regbase == NIX_AF_SMQX_CFG(0) && (regval & BIT_ULL(49))) { - err = rvu_poll_reg(rvu, blkaddr, - reg, BIT_ULL(49), true); - if (err) - return NIX_AF_SMQ_FLUSH_FAILED; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + regval &= ~BIT_ULL(49); } + rvu_write64(rvu, blkaddr, reg, regval); } + + rvu_nix_txsch_config_changed(nix_hw); + rvu_nix_txsch_unlock(nix_hw); return 0; } @@ -1578,7 +1948,8 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, { u64 regval = req->vtag_size; - if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) + if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || + req->vtag_size > VTAGSIZE_T8) return -EINVAL; if (req->rx.capture_vtag) @@ -1591,9 +1962,149 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, return 0; } +static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, + u16 pcifunc, int index) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + + if (vlan->entry2pfvf_map[index] != pcifunc) + return NIX_AF_ERR_PARAM; + + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); + + vlan->entry2pfvf_map[index] = 0; + rvu_free_rsrc(&vlan->rsrc, index); + + return 0; +} + +static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) +{ + struct nix_txvlan *vlan; + struct nix_hw *nix_hw; + int index, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + vlan = &nix_hw->txvlan; + + mutex_lock(&vlan->rsrc_lock); + /* Scan all the entries and free the ones mapped to 'pcifunc' */ + for (index = 0; index < vlan->rsrc.max; index++) { + if (vlan->entry2pfvf_map[index] == pcifunc) + nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); + } + mutex_unlock(&vlan->rsrc_lock); +} + +static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, + u64 vtag, u8 size) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + u64 regval; + int index; + + mutex_lock(&vlan->rsrc_lock); + + index = rvu_alloc_rsrc(&vlan->rsrc); + if (index < 0) { + mutex_unlock(&vlan->rsrc_lock); + return index; + } + + mutex_unlock(&vlan->rsrc_lock); + + regval = size ? vtag : vtag << 32; + + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_DATA(index), regval); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_CTL(index), size); + + return index; +} + +static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, + struct nix_vtag_config *req) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + u16 pcifunc = req->hdr.pcifunc; + int idx0 = req->tx.vtag0_idx; + int idx1 = req->tx.vtag1_idx; + int err = 0; + + if (req->tx.free_vtag0 && req->tx.free_vtag1) + if (vlan->entry2pfvf_map[idx0] != pcifunc || + vlan->entry2pfvf_map[idx1] != pcifunc) + return NIX_AF_ERR_PARAM; + + mutex_lock(&vlan->rsrc_lock); + + if (req->tx.free_vtag0) { + err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); + if (err) + goto exit; + } + + if (req->tx.free_vtag1) + err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); + +exit: + mutex_unlock(&vlan->rsrc_lock); + return err; +} + +static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, + struct nix_vtag_config *req, + struct nix_vtag_config_rsp *rsp) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + u16 pcifunc = req->hdr.pcifunc; + + if (req->tx.cfg_vtag0) { + rsp->vtag0_idx = + nix_tx_vtag_alloc(rvu, blkaddr, + req->tx.vtag0, req->vtag_size); + + if (rsp->vtag0_idx < 0) + return NIX_AF_ERR_TX_VTAG_NOSPC; + + vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; + } + + if (req->tx.cfg_vtag1) { + rsp->vtag1_idx = + nix_tx_vtag_alloc(rvu, blkaddr, + req->tx.vtag1, req->vtag_size); + + if (rsp->vtag1_idx < 0) + goto err_free; + + vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; + } + + return 0; + +err_free: + if (req->tx.cfg_vtag0) + nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); + + return NIX_AF_ERR_TX_VTAG_NOSPC; +} + int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, - struct msg_rsp *rsp) + struct nix_vtag_config_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; @@ -1608,12 +2119,21 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, return NIX_AF_ERR_AF_LF_INVALID; if (req->cfg_type) { + /* rx vtag configuration */ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); if (err) return NIX_AF_ERR_PARAM; } else { - /* TODO: handle tx vtag configuration */ - return 0; + /* tx vtag configuration */ + if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && + (req->tx.free_vtag0 || req->tx.free_vtag1)) + return NIX_AF_ERR_PARAM; + + if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) + return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); + + if (req->tx.free_vtag0 || req->tx.free_vtag1) + return nix_tx_vtag_decfg(rvu, blkaddr, req); } return 0; @@ -1650,7 +2170,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, } static int nix_update_mce_list(struct nix_mce_list *mce_list, - u16 pcifunc, int idx, bool add) + u16 pcifunc, bool add) { struct mce *mce, *tail = NULL; bool delete = false; @@ -1679,7 +2199,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, mce = kzalloc(sizeof(*mce), GFP_KERNEL); if (!mce) return -ENOMEM; - mce->idx = idx; mce->pcifunc = pcifunc; if (!tail) hlist_add_head(&mce->node, &mce_list->head); @@ -1691,12 +2210,12 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) { - int err = 0, idx, next_idx, count; + int err = 0, idx, next_idx, last_idx; struct nix_mce_list *mce_list; - struct mce *mce, *next_mce; struct nix_mcast *mcast; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; + struct mce *mce; int blkaddr; /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ @@ -1728,31 +2247,31 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) mutex_lock(&mcast->mce_lock); - err = nix_update_mce_list(mce_list, pcifunc, idx, add); + err = nix_update_mce_list(mce_list, pcifunc, add); if (err) goto end; /* Disable MCAM entry in NPC */ - - if (!mce_list->count) + if (!mce_list->count) { + rvu_npc_disable_bcast_entry(rvu, pcifunc); goto end; - count = mce_list->count; + } /* Dump the updated list to HW */ + idx = pfvf->bcast_mce_idx; + last_idx = idx + mce_list->count - 1; hlist_for_each_entry(mce, &mce_list->head, node) { - next_idx = 0; - count--; - if (count) { - next_mce = hlist_entry(mce->node.next, - struct mce, node); - next_idx = next_mce->idx; - } + if (idx > last_idx) + break; + + next_idx = idx + 1; /* EOL should be set in last MCE */ - err = nix_setup_mce(rvu, mce->idx, - NIX_AQ_INSTOP_WRITE, mce->pcifunc, - next_idx, count ? false : true); + err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + (next_idx > last_idx) ? true : false); if (err) goto end; + idx++; } end: @@ -1846,11 +2365,36 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) return nix_setup_bcast_tables(rvu, nix_hw); } +static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) +{ + struct nix_txvlan *vlan = &nix_hw->txvlan; + int err; + + /* Allocate resource bimap for tx vtag def registers*/ + vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; + err = rvu_alloc_bitmap(&vlan->rsrc); + if (err) + return -ENOMEM; + + /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ + vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, + sizeof(u16), GFP_KERNEL); + if (!vlan->entry2pfvf_map) + goto free_mem; + + mutex_init(&vlan->rsrc_lock); + return 0; + +free_mem: + kfree(vlan->rsrc.bmap); + return -ENOMEM; +} + static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct nix_txsch *txsch; u64 cfg, reg; - int err, lvl; + int err, lvl, schq; /* Get scheduler queue count of each type and alloc * bitmap for each for alloc/free/attach operations. @@ -1888,7 +2432,8 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) sizeof(u32), GFP_KERNEL); if (!txsch->pfvf_map) return -ENOMEM; - memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32)); + for (schq = 0; schq < txsch->schq.max; schq++) + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } return 0; } @@ -2032,51 +2577,82 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) if (field_marker) memset(&tmp, 0, sizeof(tmp)); + field_marker = true; + keyoff_marker = true; switch (key_type) { case NIX_FLOW_KEY_TYPE_PORT: field->sel_chan = true; /* This should be set to 1, when SEL_CHAN is set */ field->bytesm1 = 1; - field_marker = true; - keyoff_marker = true; break; case NIX_FLOW_KEY_TYPE_IPV4: + case NIX_FLOW_KEY_TYPE_INNR_IPV4: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP; + } field->hdr_offset = 12; /* SIP offset */ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ field->ltype_mask = 0xF; /* Match only IPv4 */ - field_marker = true; keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: + case NIX_FLOW_KEY_TYPE_INNR_IPV6: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP6; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP6; + } field->hdr_offset = 8; /* SIP offset */ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ field->ltype_mask = 0xF; /* Match only IPv6 */ - field_marker = true; - keyoff_marker = true; break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: case NIX_FLOW_KEY_TYPE_SCTP: + case NIX_FLOW_KEY_TYPE_INNR_TCP: + case NIX_FLOW_KEY_TYPE_INNR_UDP: + case NIX_FLOW_KEY_TYPE_INNR_SCTP: field->lid = NPC_LID_LD; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) + field->lid = NPC_LID_LH; field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ - if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) { + + /* Enum values for NPC_LID_LD and NPC_LID_LG are same, + * so no need to change the ltype_match, just change + * the lid for inner protocols + */ + BUILD_BUG_ON((int)NPC_LT_LD_TCP != + (int)NPC_LT_LH_TU_TCP); + BUILD_BUG_ON((int)NPC_LT_LD_UDP != + (int)NPC_LT_LH_TU_UDP); + BUILD_BUG_ON((int)NPC_LT_LD_SCTP != + (int)NPC_LT_LH_TU_SCTP); + + if ((key_type == NIX_FLOW_KEY_TYPE_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && + valid_key) { field->ltype_match |= NPC_LT_LD_TCP; group_member = true; - } else if (key_type == NIX_FLOW_KEY_TYPE_UDP && + } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && valid_key) { field->ltype_match |= NPC_LT_LD_UDP; group_member = true; - } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP && + } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && valid_key) { field->ltype_match |= NPC_LT_LD_SCTP; group_member = true; } field->ltype_mask = ~field->ltype_match; - if (key_type == NIX_FLOW_KEY_TYPE_SCTP) { + if (key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { /* Handle the case where any of the group item * is enabled in the group but not the final one */ @@ -2084,13 +2660,73 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) valid_key = true; group_member = false; } - field_marker = true; - keyoff_marker = true; } else { field_marker = false; keyoff_marker = false; } break; + case NIX_FLOW_KEY_TYPE_NVGRE: + field->lid = NPC_LID_LD; + field->hdr_offset = 4; /* VSID offset */ + field->bytesm1 = 2; + field->ltype_match = NPC_LT_LD_NVGRE; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_VXLAN: + case NIX_FLOW_KEY_TYPE_GENEVE: + field->lid = NPC_LID_LE; + field->bytesm1 = 2; + field->hdr_offset = 4; + field->ltype_mask = 0xF; + field_marker = false; + keyoff_marker = false; + + if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { + field->ltype_match |= NPC_LT_LE_VXLAN; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { + field->ltype_match |= NPC_LT_LE_GENEVE; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { + if (group_member) { + field->ltype_mask = ~field->ltype_match; + field_marker = true; + keyoff_marker = true; + valid_key = true; + group_member = false; + } + } + break; + case NIX_FLOW_KEY_TYPE_ETH_DMAC: + case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: + field->lid = NPC_LID_LA; + field->ltype_match = NPC_LT_LA_ETHER; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { + field->lid = NPC_LID_LF; + field->ltype_match = NPC_LT_LF_TU_ETHER; + } + field->hdr_offset = 0; + field->bytesm1 = 5; /* DMAC 6 Byte */ + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_IPV6_EXT: + field->lid = NPC_LID_LC; + field->hdr_offset = 40; /* IPV6 hdr */ + field->bytesm1 = 0; /* 1 Byte ext hdr*/ + field->ltype_match = NPC_LT_LC_IP6_EXT; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_GTPU: + field->lid = NPC_LID_LE; + field->hdr_offset = 4; + field->bytesm1 = 3; /* 4 bytes TID*/ + field->ltype_match = NPC_LT_LE_GTPU; + field->ltype_mask = 0xF; + break; } field->ena = 1; @@ -2259,6 +2895,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) { + bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; @@ -2273,12 +2910,31 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + /* VF can't overwrite admin(PF) changes */ + if (from_vf && pfvf->pf_set_vfs_mac) + return -EPERM; + ether_addr_copy(pfvf->mac_addr, req->mac_addr); rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + return 0; +} + +int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, + struct msg_req *req, + struct nix_get_mac_addr_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + + if (!is_nixlf_attached(rvu, pcifunc)) + return NIX_AF_ERR_AF_LF_INVALID; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); return 0; } @@ -2313,9 +2969,6 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, else rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, allmulti); - - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); - return 0; } @@ -2449,68 +3102,9 @@ linkcfg: cfg &= ~(0xFFFFFULL << 12); cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg); - - return 0; -} - -int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp) -{ - struct npc_mcam_alloc_entry_req alloc_req = { }; - struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; - struct npc_mcam_free_entry_req free_req = { }; - u16 pcifunc = req->hdr.pcifunc; - int blkaddr, nixlf, err; - struct rvu_pfvf *pfvf; - - /* LBK VFs do not have separate MCAM UCAST entry hence - * skip allocating rxvlan for them - */ - if (is_afvf(pcifunc)) - return 0; - - pfvf = rvu_get_pfvf(rvu, pcifunc); - if (pfvf->rxvlan) - return 0; - - /* alloc new mcam entry */ - alloc_req.hdr.pcifunc = pcifunc; - alloc_req.count = 1; - - err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, - &alloc_rsp); - if (err) - return err; - - /* update entry to enable rxvlan offload */ - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) { - err = NIX_AF_ERR_AF_LF_INVALID; - goto free_entry; - } - - nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) { - err = NIX_AF_ERR_AF_LF_INVALID; - goto free_entry; - } - - pfvf->rxvlan_index = alloc_rsp.entry_list[0]; - /* all it means is that rxvlan_index is valid */ - pfvf->rxvlan = true; - - err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); - if (err) - goto free_entry; + rvu_nix_update_link_credits(rvu, blkaddr, link, cfg); return 0; -free_entry: - free_req.hdr.pcifunc = pcifunc; - free_req.entry = alloc_rsp.entry_list[0]; - rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); - pfvf->rxvlan = false; - return err; } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -2583,6 +3177,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + /* Skip when cgx is not available or lmac cnt is zero */ + if (lmac_cnt <= 0) + continue; tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); @@ -2591,9 +3188,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); - rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_EXPR_CREDIT(link), - tx_credits); } } @@ -2605,8 +3199,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); - rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits); } } @@ -2674,6 +3266,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); cfg &= ~0x3FFEULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of SQB aka SQEs */ + cfg |= 0x04ULL; +#endif rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); /* Result structure can be followed by RQ/SQ/CQ context at @@ -2704,14 +3300,6 @@ int rvu_nix_init(struct rvu *rvu) return 0; block = &hw->block[blkaddr]; - /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt - * internal state when conditional clocks are turned off. - * Hence enable them. - */ - if (is_rvu_9xxx_A0(rvu)) - rvu_write64(rvu, blkaddr, NIX_AF_CFG, - rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL); - /* Calibrate X2P bus to check if CGX/LBK links are fine */ err = nix_calibrate_x2p(rvu, blkaddr); if (err) @@ -2751,6 +3339,10 @@ int rvu_nix_init(struct rvu *rvu) if (err) return err; + err = nix_setup_txvlan(rvu, hw->nix0); + if (err) + return err; + /* Configure segmentation offload formats */ nix_setup_lso(rvu, hw->nix0, blkaddr); @@ -2763,23 +3355,23 @@ int rvu_nix_init(struct rvu *rvu) rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F); + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F); + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F); + (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F); + (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) | + (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) | 0x0F); err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); @@ -2788,6 +3380,24 @@ int rvu_nix_init(struct rvu *rvu) /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ nix_link_config(rvu, blkaddr); + + /* Enable Channel backpressure */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); + + err = rvu_nix_fixes_init(rvu, hw->nix0, blkaddr); + if (err) + return err; + + if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) { + /* Config IPSec headers identification */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(0), + (NPC_LID_LD << 8) | + (NPC_LT_LD_ESP << 4) | 0x0F); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(1), + (8 << 12) | (NPC_LID_LH << 8) | + (NPC_LT_LH_TU_ESP << 4) | 0x0F); + } } return 0; } @@ -2798,6 +3408,7 @@ void rvu_nix_freemem(struct rvu *rvu) struct rvu_block *block; struct nix_txsch *txsch; struct nix_mcast *mcast; + struct nix_txvlan *vlan; struct nix_hw *nix_hw; int blkaddr, lvl; @@ -2818,14 +3429,20 @@ void rvu_nix_freemem(struct rvu *rvu) kfree(txsch->schq.bmap); } + vlan = &nix_hw->txvlan; + kfree(vlan->rsrc.bmap); + mutex_destroy(&vlan->rsrc_lock); + devm_kfree(rvu->dev, vlan->entry2pfvf_map); + mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mcast_buf); mutex_destroy(&mcast->mce_lock); + rvu_nix_fixes_exit(rvu, nix_hw); } } -static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) +int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; @@ -2853,7 +3470,10 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, return err; rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); - return 0; + + npc_mcam_enable_flows(rvu, pcifunc); + + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, @@ -2867,22 +3487,30 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, return err; rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); - return 0; + + return rvu_cgx_start_stop_io(rvu, pcifunc, false); } void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); nix_interface_deinit(rvu, pcifunc, nixlf); nix_rx_sync(rvu, blkaddr); nix_txschq_free(rvu, pcifunc); + rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (pfvf->sq_ctx) { ctx_req.ctype = NIX_AQ_CTYPE_SQ; err = nix_lf_hwctx_disable(rvu, &ctx_req); @@ -2904,7 +3532,80 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) dev_err(rvu->dev, "CQ ctx disable failed\n"); } + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + cgx_lmac_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + nix_ctx_free(rvu, pfvf); + + if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) { + /* reset the configuration related to inline ipsec */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(nixlf), + 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), + 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf), + 0x0); + } +} + +int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int blkaddr; + int nixlf; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + cfg |= BIT_ULL(32); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + return 0; +} + +int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int blkaddr; + int nixlf; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + cfg &= ~BIT_ULL(32); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + return 0; } int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, @@ -2957,3 +3658,326 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, return 0; } + +int rvu_mbox_handler_nix_set_vlan_tpid(struct rvu *rvu, + struct nix_set_vlan_tpid *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err, blkaddr; + u64 cfg; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return err; + + if (req->vlan_type != NIX_VLAN_TYPE_OUTER && + req->vlan_type != NIX_VLAN_TYPE_INNER) + return NIX_AF_ERR_PARAM; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + + if (req->vlan_type == NIX_VLAN_TYPE_OUTER) + cfg = (cfg & ~GENMASK_ULL(15, 0)) | req->tpid; + else + cfg = (cfg & ~GENMASK_ULL(31, 16)) | ((u64)req->tpid << 16); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + return 0; +} + +static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int blkaddr; + u64 intr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT); + + if (intr & BIT_ULL(0)) + dev_err(rvu->dev, "NIX: Unmapped slot error\n"); + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr); + return IRQ_HANDLED; +} + +static irqreturn_t rvu_nix_af_err_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int blkaddr; + u64 intr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT); + + if (intr & BIT_ULL(14)) + dev_err(rvu->dev, "NIX: Memory fault on NIX_AQ_INST_S read\n"); + + if (intr & BIT_ULL(13)) + dev_err(rvu->dev, "NIX: Memory fault on NIX_AQ_RES_S write\n"); + + if (intr & BIT_ULL(12)) + dev_err(rvu->dev, "NIX: AQ doorbell error\n"); + + if (intr & BIT_ULL(6)) + dev_err(rvu->dev, "NIX: Rx on unmapped PF_FUNC\n"); + + if (intr & BIT_ULL(5)) + dev_err(rvu->dev, "NIX: Rx multicast replication error\n"); + + if (intr & BIT_ULL(4)) + dev_err(rvu->dev, "NIX: Memory fault on NIX_RX_MCE_S read\n"); + + if (intr & BIT_ULL(3)) + dev_err(rvu->dev, "NIX: Memory fault on multicast WQE read\n"); + + if (intr & BIT_ULL(2)) + dev_err(rvu->dev, "NIX: Memory fault on mirror WQE read\n"); + + if (intr & BIT_ULL(1)) + dev_err(rvu->dev, "NIX: Memory fault on mirror pkt write\n"); + + if (intr & BIT_ULL(0)) + dev_err(rvu->dev, "NIX: Memory fault on multicast pkt write\n"); + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr); + return IRQ_HANDLED; +} + +static irqreturn_t rvu_nix_af_ras_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int blkaddr; + u64 intr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + intr = rvu_read64(rvu, blkaddr, NIX_AF_RAS); + + if (intr & BIT_ULL(34)) + dev_err(rvu->dev, "NIX: Poisoned data on NIX_AQ_INST_S read\n"); + + if (intr & BIT_ULL(33)) + dev_err(rvu->dev, "NIX: Poisoned data on NIX_AQ_RES_S write\n"); + + if (intr & BIT_ULL(32)) + dev_err(rvu->dev, "NIX: Poisoned data on HW context read\n"); + + if (intr & BIT_ULL(4)) + dev_err(rvu->dev, "NIX: Poisoned data on packet read from mirror buffer\n"); + + if (intr & BIT_ULL(3)) + dev_err(rvu->dev, "NIX: Poisoned data on packet read from multicast buffer\n"); + + if (intr & BIT_ULL(2)) + dev_err(rvu->dev, "NIX: Poisoned data on WQE read from mirror buffer\n"); + + if (intr & BIT_ULL(1)) + dev_err(rvu->dev, "NIX: Poisoned data on WQE read from multicast buffer\n"); + + if (intr & BIT_ULL(0)) + dev_err(rvu->dev, "NIX: Poisoned data on NIX_RX_MCE_S read\n"); + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr); + return IRQ_HANDLED; +} + +static bool rvu_nix_af_request_irq(struct rvu *rvu, int blkaddr, int offset, + const char *name, irq_handler_t fn) +{ + int rc; + + WARN_ON(rvu->irq_allocated[offset]); + rvu->irq_allocated[offset] = false; + sprintf(&rvu->irq_name[offset * NAME_SIZE], name); + rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (rc) + dev_warn(rvu->dev, "Failed to register %s irq\n", name); + else + rvu->irq_allocated[offset] = true; + + return rvu->irq_allocated[offset]; +} + +int rvu_nix_register_interrupts(struct rvu *rvu) +{ + int blkaddr, base; + bool rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + /* Get NIX AF MSIX vectors offset. */ + base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff; + if (!base) { + dev_warn(rvu->dev, + "Failed to get NIX_AF_INT vector offsets\n"); + return 0; + } + + /* Register and enable NIX_AF_RVU_INT interrupt */ + rc = rvu_nix_af_request_irq(rvu, blkaddr, base + NIX_AF_INT_VEC_RVU, + "NIX_AF_RVU_INT", + rvu_nix_af_rvu_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL); + + /* Register and enable NIX_AF_ERR_INT interrupt */ + rc = rvu_nix_af_request_irq(rvu, blkaddr, base + NIX_AF_INT_VEC_AF_ERR, + "NIX_AF_ERR_INT", + rvu_nix_af_err_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL); + + /* Register and enable NIX_AF_RAS interrupt */ + rc = rvu_nix_af_request_irq(rvu, blkaddr, base + NIX_AF_INT_VEC_POISON, + "NIX_AF_RAS", + rvu_nix_af_ras_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); + + return 0; +err: + rvu_nix_unregister_interrupts(rvu); + return rc; +} + +void rvu_nix_unregister_interrupts(struct rvu *rvu) +{ + int blkaddr, offs, i; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return; + + offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff; + if (!offs) + return; + + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL); + + if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) { + free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU), + rvu); + rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; + } + + for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + if (rvu->irq_allocated[offs + i]) { + free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu); + rvu->irq_allocated[offs + i] = false; + } +} + +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, + struct nix_inline_ipsec_cfg *req, + struct msg_rsp *rsp) +{ + int blkaddr; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->enable) { + /* Set OPCODE and EGRP */ + val = (u64)req->gen_cfg.egrp << 48 | + (u64)req->gen_cfg.opcode << 32; + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); + + /* Set CPT queue for inline IPSec */ + val = (u64)req->inst_qsel.cpt_pf_func << 8 | + req->inst_qsel.cpt_slot; + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(0), val); + + /* Set CPT credit */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(0), + req->cpt_credit); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(0), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(0), 0x3FFFFF); + } + + return 0; +} + +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg( +struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int lf, blkaddr; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + lf = rvu_get_lf(rvu, block, pcifunc, 0); + if (lf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->enable) { + /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ + val = (u64)req->ipsec_cfg0.tt << 44 | + (u64)req->ipsec_cfg0.tag_const << 20 | + (u64)req->ipsec_cfg0.sa_pow2_size << 16 | + req->ipsec_cfg0.lenm1_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); + + /* Set SA_IDX_W and SA_IDX_MAX */ + val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | + req->ipsec_cfg1.sa_idx_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); + + /* Set SA base address */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + req->sa_base_addr); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + 0x0); + } + + return 0; +} + +void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) +{ + bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + + /* overwrite vf mac address with default_mac */ + if (from_vf) + ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); +} |