aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c64
1 files changed, 27 insertions, 37 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 9513381d7e5f..6864fa315900 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -34,12 +34,6 @@ static inline struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
return cqe_hdr;
}
-/* Flush SQE written to LMT to SQB */
-static inline u64 otx2_lmt_flush(uint64_t addr)
-{
- return atomic64_fetch_xor_relaxed(0, (atomic64_t *)addr);
-}
-
static inline unsigned int frag_num(unsigned int i)
{
#ifdef __BIG_ENDIAN
@@ -67,8 +61,8 @@ static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
offset = frag->page_offset;
*len = skb_frag_size(frag);
}
- return dma_map_page_attrs(pfvf->dev, page, offset, *len,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ return otx2_dma_map_page(pfvf, page, offset, *len,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
@@ -76,9 +70,9 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
int seg;
for (seg = 0; seg < sg->num_segs; seg++) {
- dma_unmap_page_attrs(pfvf->dev, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
sg->num_segs = 0;
}
@@ -95,7 +89,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
snd_comp = (struct nix_send_comp_s *)
((void *)cqe_hdr + sizeof(*cqe_hdr));
- if (snd_comp->status) {
+ if (unlikely(snd_comp->status)) {
/* tx packet error handling*/
if (netif_msg_tx_err(pfvf)) {
netdev_info(pfvf->netdev,
@@ -109,7 +103,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg = &sq->sg[snd_comp->sqe_id];
skb = (struct sk_buff *)sg->skb;
- if (!skb)
+ if (unlikely(!skb))
return;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
@@ -204,7 +198,7 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
- if (!skb_shinfo(skb)->nr_frags) {
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
/* Check if data starts at some nonzero offset
* from the start of the buffer. For now the
* only possible offset is 8 bytes in the case
@@ -221,8 +215,8 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
va - page_address(page) + off, len - off, RCV_FRAG_LEN);
- dma_unmap_page_attrs(pfvf->dev, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
@@ -319,7 +313,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
/* CQE_HDR_S for a Rx pkt is always followed by RX_PARSE_S */
parse = (struct nix_rx_parse_s *)((void *)cqe_hdr + sizeof(*cqe_hdr));
- if (parse->errlev || parse->errcode) {
+ if (unlikely(parse->errlev || parse->errcode)) {
if (otx2_check_rcv_errors(pfvf, parse, cq->cq_idx))
return;
}
@@ -328,7 +322,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
end = start + ((parse->desc_sizem1 + 1) * 16);
skb = napi_get_frags(napi);
- if (!skb)
+ if (unlikely(!skb))
return;
/* Run through the each NIX_RX_SG_S subdc and frame the skb */
@@ -376,9 +370,9 @@ static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Make sure HW writes to CQ are done */
dma_rmb();
- while (processed_cqe < budget) {
+ while (likely(processed_cqe < budget)) {
cqe_hdr = otx2_get_next_cqe(cq);
- if (!cqe_hdr) {
+ if (unlikely(!cqe_hdr)) {
if (!processed_cqe)
return 0;
break;
@@ -393,13 +387,13 @@ static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (!cq->pool_ptrs)
+ if (unlikely(!cq->pool_ptrs))
return 0;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_ATOMIC);
- if (bufptr <= 0) {
+ if (unlikely(bufptr <= 0)) {
struct refill_work *work;
struct delayed_work *dwork;
@@ -434,9 +428,9 @@ static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf,
/* Make sure HW writes to CQ are done */
dma_rmb();
- while (processed_cqe < budget) {
+ while (likely(processed_cqe < budget)) {
cqe_hdr = otx2_get_next_cqe(cq);
- if (!cqe_hdr) {
+ if (unlikely(!cqe_hdr)) {
if (!processed_cqe)
return 0;
break;
@@ -452,7 +446,7 @@ static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf,
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (tx_pkts) {
+ if (likely(tx_pkts)) {
txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
@@ -476,9 +470,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = 0; i < CQS_PER_CINT; i++) {
+ for (i = CQS_PER_CINT - 1; i >= 0; i--) {
cq_idx = cq_poll->cq_ids[i];
- if (cq_idx == CINT_INVALID_CQ)
+ if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
@@ -911,14 +905,14 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (free_sqe < sq->sqe_thresh ||
free_sqe < otx2_get_sqe_count(pfvf, skb))
- goto fail;
+ return false;
num_segs = skb_shinfo(skb)->nr_frags + 1;
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
- if (num_segs > OTX2_MAX_FRAGS_IN_SQE) {
+ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
return true;
@@ -961,10 +955,6 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
otx2_sqe_flush(sq, offset);
return true;
-fail:
- netdev_warn(pfvf->netdev, "SQ%d full, SQB count %d Aura count %lld\n",
- qidx, sq->num_sqbs, *sq->aura_fc_addr);
- return false;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
@@ -993,10 +983,10 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
*iova -= OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain,
*iova);
- dma_unmap_page_attrs(pfvf->dev, *iova,
- RCV_FRAG_LEN,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, *iova,
+ RCV_FRAG_LEN,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
put_page(virt_to_page(phys_to_virt(pa)));
iova++;
}