diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/Makefile | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c | 1488 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h | 761 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c | 1565 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c | 613 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2548 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c | 212 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h | 160 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c | 285 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h | 425 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 1058 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h | 156 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c | 702 |
14 files changed, 10006 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile new file mode 100644 index 000000000000..48846096f4c7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell's OcteonTX2 ethernet device drivers +# + +obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o +obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o + +octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ + otx2_ptp.o otx2_flows.o +octeontx2_nicvf-y := otx2_vf.o otx2_smqvf.o + +ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c new file mode 100644 index 000000000000..ab6bd745b881 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -0,0 +1,1488 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <net/tso.h> + +#include "otx2_reg.h" +#include "otx2_common.h" +#include "otx2_struct.h" + +static inline void otx2_nix_rq_op_stats(struct queue_stats *stats, + struct otx2_nic *pfvf, int qidx); +static inline void otx2_nix_sq_op_stats(struct queue_stats *stats, + struct otx2_nic *pfvf, int qidx); + +void otx2_update_lmac_stats(struct otx2_nic *pfvf) +{ + struct msg_req *req; + + if (!netif_running(pfvf->netdev)) + return; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return; + } + + otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); +} + +void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf) +{ + struct msg_req *req; + + if (!netif_running(pfvf->netdev)) + return; + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return; + } + otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); +} + +int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) +{ + struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; + + if (!pfvf->qset.rq) + return 0; + + otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); + return 1; +} + +int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) +{ + struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; + + if (!pfvf->qset.sq) + return 0; + + otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); + return 1; +} + +void otx2_get_dev_stats(struct otx2_nic *pfvf) +{ + struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; + +#define OTX2_GET_RX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) +#define OTX2_GET_TX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) + + dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); + dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); + dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); + dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); + dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); + dev_stats->rx_frames = dev_stats->rx_bcast_frames + + dev_stats->rx_mcast_frames + + dev_stats->rx_ucast_frames; + + dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); + dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); + dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); + dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); + dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); + dev_stats->tx_frames = dev_stats->tx_bcast_frames + + dev_stats->tx_mcast_frames + + dev_stats->tx_ucast_frames; +} + +void otx2_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; + + otx2_get_dev_stats(pfvf); + + stats->rx_bytes = dev_stats->rx_bytes; + stats->rx_packets = dev_stats->rx_frames; + stats->rx_dropped = dev_stats->rx_drops; + stats->multicast = dev_stats->rx_mcast_frames; + + stats->tx_bytes = dev_stats->tx_bytes; + stats->tx_packets = dev_stats->tx_frames; + stats->tx_dropped = dev_stats->tx_drops; +} +EXPORT_SYMBOL(otx2_get_stats64); + +/* Sync MAC address with RVU */ +int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) +{ + struct nix_set_mac_addr *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, + struct net_device *netdev) +{ + struct nix_get_mac_addr_rsp *rsp; + struct mbox_msghdr *msghdr; + struct msg_req *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + + msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (!msghdr) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + rsp = (struct nix_get_mac_addr_rsp *)msghdr; + ether_addr_copy(netdev->dev_addr, rsp->mac_addr); + otx2_mbox_unlock(&pfvf->mbox); + + return 0; +} + +int otx2_set_mac_address(struct net_device *netdev, void *p) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + /* update dmac field in vlan offload rule */ + if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + otx2_install_rxvlan_offload_flow(pfvf); + } else { + return -EPERM; + } + + return 0; +} +EXPORT_SYMBOL(otx2_set_mac_address); + +int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) +{ + struct nix_frs_cfg *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->update_smq = true; + /* Add EDSA/HIGIG2 header len to maxlen */ + pfvf->max_frs = mtu + OTX2_ETH_HLEN + pfvf->addl_mtu; + req->maxlen = pfvf->max_frs; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct nix_rss_flowkey_cfg *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + req->mcam_index = -1; /* Default or reserved index */ + req->flowkey_cfg = rss->flowkey_cfg; + req->group = DEFAULT_RSS_CONTEXT_GROUP; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +int otx2_set_rss_table(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct mbox *mbox = &pfvf->mbox; + struct nix_aq_enq_req *aq; + int idx, err; + + otx2_mbox_lock(mbox); + /* Get memory to put this msg */ + for (idx = 0; idx < rss->rss_size; idx++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + err = otx2_sync_mbox_msg(mbox); + if (err) { + otx2_mbox_unlock(mbox); + return err; + } + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + otx2_mbox_unlock(mbox); + return -ENOMEM; + } + } + + aq->rss.rq = rss->ind_tbl[idx]; + + /* Fill AQ info */ + aq->qidx = idx; + aq->ctype = NIX_AQ_CTYPE_RSS; + aq->op = NIX_AQ_INSTOP_INIT; + } + err = otx2_sync_mbox_msg(mbox); + otx2_mbox_unlock(mbox); + return err; +} + +void otx2_set_rss_key(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + u64 *key = (u64 *)&rss->key[4]; + int idx; + + /* 352bit or 44byte key needs to be configured as below + * NIX_LF_RX_SECRETX0 = key<351:288> + * NIX_LF_RX_SECRETX1 = key<287:224> + * NIX_LF_RX_SECRETX2 = key<223:160> + * NIX_LF_RX_SECRETX3 = key<159:96> + * NIX_LF_RX_SECRETX4 = key<95:32> + * NIX_LF_RX_SECRETX5<63:32> = key<31:0> + */ + otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), + (u64)(*((u32 *)&rss->key)) << 32); + idx = sizeof(rss->key) / sizeof(u64); + while (idx > 0) { + idx--; + otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); + } +} + +int otx2_rss_init(struct otx2_nic *pfvf) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + int idx, ret = 0; + + /* Enable RSS */ + rss->enable = true; + rss->rss_size = sizeof(rss->ind_tbl); + + /* Init RSS key here */ + netdev_rss_key_fill(rss->key, sizeof(rss->key)); + otx2_set_rss_key(pfvf); + + /* Default indirection table */ + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = + ethtool_rxfh_indir_default(idx, pfvf->hw.rx_queues); + + ret = otx2_set_rss_table(pfvf); + if (ret) + return ret; + + /* Default flowkey or hash config to be used for generating flow tag */ + rss->flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | + NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | + NIX_FLOW_KEY_TYPE_SCTP; + + return otx2_set_flowkey_cfg(pfvf); +} + +void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) +{ + /* Configure CQE interrupt coalescing parameters + * + * HW triggers an irq when ECOUNT > cq_ecount_wait, hence + * set 1 less than cq_ecount_wait. And cq_time_wait is in + * usecs, convert that to 100ns count. + */ + otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), + ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | + ((u64)pfvf->hw.cq_qcount_wait << 32) | + (pfvf->hw.cq_ecount_wait - 1)); +} + +dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, + gfp_t gfp) +{ + dma_addr_t iova; + + /* Check if request can be accommodated in previous allocated page */ + if (pool->page && + ((pool->page_offset + pool->rbsize) <= PAGE_SIZE)) { + pool->pageref++; + goto ret; + } + + otx2_get_page(pool); + + /* Allocate a new page */ + pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0); + if (unlikely(!pool->page)) + return -ENOMEM; + + pool->page_offset = 0; +ret: + iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset, + pool->rbsize, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + if (!iova) { + if (!pool->page_offset) + __free_pages(pool->page, 0); + pool->page = NULL; + return -ENOMEM; + } + pool->page_offset += pool->rbsize; + return iova; +} + +void otx2_tx_timeout(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + schedule_work(&pfvf->reset_task); +} +EXPORT_SYMBOL(otx2_tx_timeout); + +void otx2_get_mac_from_af(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int err; + + err = otx2_hw_get_mac_addr(pfvf, netdev); + if (err) + dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); + + /* Normally AF should provide mac addresses for both PFs and CGX mapped + * VFs which means random mac gets generated either in case of error + * or LBK netdev. + */ + if (!is_valid_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); +} +EXPORT_SYMBOL(otx2_get_mac_from_af); + +static int otx2_get_link(struct otx2_nic *pfvf) +{ + int link = 0; + u16 map; + + /* cgx lmac link */ + if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { + map = pfvf->hw.tx_chan_base & 0x7FF; + link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); + } + /* LBK channel */ + if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) + link = 12; + + return link; +} + +int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) +{ + struct nix_txschq_config *req; + struct otx2_hw *hw = &pfvf->hw; + u64 schq, parent; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); + if (!req) + return -ENOMEM; + + req->lvl = lvl; + req->num_regs = 1; + + schq = hw->txschq_list[lvl][0]; + /* Set topology e.t.c configuration */ + if (lvl == NIX_TXSCH_LVL_SMQ) { + /* Set min and max Tx packet lengths */ + req->reg[0] = NIX_AF_SMQX_CFG(schq); + req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) | + OTX2_MIN_MTU; + + req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39); + req->num_regs++; + /* MDQ config */ + parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; + req->reg[1] = NIX_AF_MDQX_PARENT(schq); + req->regval[1] = parent << 16; + req->num_regs++; + /* Set DWRR quantum */ + req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); + req->regval[2] = DFLT_RR_QTM; + } else if (lvl == NIX_TXSCH_LVL_TL4) { + parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; + req->reg[0] = NIX_AF_TL4X_PARENT(schq); + req->regval[0] = parent << 16; + req->num_regs++; + req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); + req->regval[1] = DFLT_RR_QTM; + } else if (lvl == NIX_TXSCH_LVL_TL3) { + parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; + req->reg[0] = NIX_AF_TL3X_PARENT(schq); + req->regval[0] = parent << 16; + req->num_regs++; + req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); + req->regval[1] = DFLT_RR_QTM; + } else if (lvl == NIX_TXSCH_LVL_TL2) { + parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; + req->reg[0] = NIX_AF_TL2X_PARENT(schq); + req->regval[0] = parent << 16; + + req->num_regs++; + req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); + req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; + + req->num_regs++; + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, + otx2_get_link(pfvf)); + /* Enable this queue and backpressure */ + req->regval[2] = BIT_ULL(13) | BIT_ULL(12); + + } else if (lvl == NIX_TXSCH_LVL_TL1) { + /* Default config for TL1. + * For VF this is always ignored. + */ + + /* Set DWRR quantum */ + req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); + req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; + + req->num_regs++; + req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); + req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); + + req->num_regs++; + req->reg[2] = NIX_AF_TL1X_CIR(schq); + req->regval[2] = 0; + } + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +int otx2_txsch_alloc(struct otx2_nic *pfvf) +{ + struct nix_txsch_alloc_req *req; + int lvl, err; + + /* Get memory to put this msg */ + req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); + if (!req) + return -ENOMEM; + + /* Request one schq per level */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + req->schq[lvl] = 1; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + return 0; +} + +int otx2_txschq_stop(struct otx2_nic *pfvf) +{ + struct nix_txsch_free_req *free_req; + int lvl, schq, err; + + otx2_mbox_lock(&pfvf->mbox); + /* Free the transmit schedulers */ + free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); + if (!free_req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + free_req->flags = TXSCHQ_FREE_ALL; + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + + /* Clear the txschq list */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) + pfvf->hw.txschq_list[lvl][schq] = 0; + } + return err; +} + +/* RED and drop levels of CQ on packet reception. + * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). + */ +#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) +#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) + +/* RED and drop levels of AURA for packet reception. + * For AURA level is measure of fullness (0x0 = empty, 255 = full). + * Eg: For RQ length 1K, for pass/drop level 204/230. + * RED accepts pkts if free pointers > 102 & <= 205. + * Drops pkts if free pointers < 102. + */ +#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ +#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ + +/* Send skid of 2000 packets required for CQ size of 4K CQEs. */ +#define SEND_CQ_SKID 2000 + +static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) +{ + struct otx2_qset *qset = &pfvf->qset; + struct nix_aq_enq_req *aq; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->rq.cq = qidx; + aq->rq.ena = 1; + aq->rq.pb_caching = 1; + aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ + aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN / 8) - 1; + aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ + aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ + aq->rq.qint_idx = 0; + aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ + aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ + aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; + aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_snd_queue *sq; + struct nix_aq_enq_req *aq; + struct otx2_pool *pool; + int err; + + pool = &pfvf->qset.pool[sqb_aura]; + sq = &qset->sq[qidx]; + sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; + sq->sqe_cnt = qset->sqe_cnt; + + err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); + if (err) + return err; + + err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, + TSO_HEADER_SIZE); + if (err) + return err; + + sq->sqe_base = sq->sqe->base; + sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); + if (!sq->sg) + return -ENOMEM; + + if (pfvf->ptp) { + err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, + sizeof(*sq->timestamps)); + if (err) + return err; + } + + sq->head = 0; + sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; + sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; + /* Set SQE threshold to 10% of total SQEs */ + sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; + sq->aura_id = sqb_aura; + sq->aura_fc_addr = pool->fc_addr->base; + sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); + sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); + + sq->stats.bytes = 0; + sq->stats.pkts = 0; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->sq.cq = pfvf->hw.rx_queues + qidx; + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ + aq->sq.cq_ena = 1; + aq->sq.ena = 1; + /* Only one SMQ is allocated, map all SQ's to that SMQ */ + aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + aq->sq.smq_rr_quantum = DFLT_RR_QTM; + aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ + aq->sq.sqb_aura = sqb_aura; + aq->sq.sq_int_ena = NIX_SQINT_BITS; + aq->sq.qint_idx = 0; + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to maintain to avoid CQ overflow. + */ + aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt)); + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) +{ + struct otx2_qset *qset = &pfvf->qset; + struct nix_aq_enq_req *aq; + struct otx2_cq_queue *cq; + int err, pool_id; + + cq = &qset->cq[qidx]; + cq->cq_idx = qidx; + if (qidx < pfvf->hw.rx_queues) { + cq->cq_type = CQ_RX; + cq->cint_idx = qidx; + cq->cqe_cnt = qset->rqe_cnt; + } else { + cq->cq_type = CQ_TX; + cq->cint_idx = qidx - pfvf->hw.rx_queues; + cq->cqe_cnt = qset->sqe_cnt; + } + cq->cqe_size = pfvf->qset.xqe_size; + + /* Allocate memory for CQEs */ + err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); + if (err) + return err; + + /* Save CQE CPU base for faster reference */ + cq->cqe_base = cq->cqe->base; + /* In case where all RQs auras point to single pool, + * all CQs receive buffer pool also point to same pool. + */ + pool_id = ((cq->cq_type == CQ_RX) && + (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; + cq->rbpool = &qset->pool[pool_id]; + cq->refill_task_sched = false; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->cq.ena = 1; + aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); + aq->cq.caching = 1; + aq->cq.base = cq->cqe->iova; + aq->cq.cint_idx = cq->cint_idx; + aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; + aq->cq.qint_idx = 0; + aq->cq.avg_level = 255; + + if (qidx < pfvf->hw.rx_queues) { + aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); + aq->cq.drop_ena = 1; + + /* Enable receive CQ backpressure */ + aq->cq.bp_ena = 1; + aq->cq.bpid = pfvf->bpid[0]; + + /* Set backpressure level is same as cq pass level */ + aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); + } + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static void otx2_pool_refill_task(struct work_struct *work) +{ + struct otx2_cq_queue *cq; + struct otx2_pool *rbpool; + struct refill_work *wrk; + int qidx, free_ptrs = 0; + struct otx2_nic *pfvf; + s64 bufptr; + + wrk = container_of(work, struct refill_work, pool_refill_work.work); + pfvf = wrk->pf; + qidx = wrk - pfvf->refill_wrk; + cq = &pfvf->qset.cq[qidx]; + rbpool = cq->rbpool; + free_ptrs = cq->pool_ptrs; + + while (cq->pool_ptrs) { + bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL); + if (bufptr <= 0) { + /* Schedule a WQ if we fails to free atleast half of the + * pointers else enable napi for this RQ. + */ + if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { + struct delayed_work *dwork; + + dwork = &wrk->pool_refill_work; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } else { + cq->refill_task_sched = false; + } + return; + } + otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); + cq->pool_ptrs--; + } + cq->refill_task_sched = false; +} + +int otx2_config_nix_queues(struct otx2_nic *pfvf) +{ + int qidx, err; + + /* Initialize RX queues */ + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { + u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + + err = otx2_rq_init(pfvf, qidx, lpb_aura); + if (err) + return err; + } + + /* Initialize TX queues */ + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); + + err = otx2_sq_init(pfvf, qidx, sqb_aura); + if (err) + return err; + } + + /* Initialize completion queues */ + for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { + err = otx2_cq_init(pfvf, qidx); + if (err) + return err; + } + + /* Initialize work queue for receive buffer refill */ + + pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, + sizeof(struct refill_work), GFP_KERNEL); + if (!pfvf->refill_wrk) + return -ENOMEM; + + for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { + pfvf->refill_wrk[qidx].pf = pfvf; + INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, + otx2_pool_refill_task); + } + return 0; +} + +int otx2_config_nix(struct otx2_nic *pfvf) +{ + struct nix_lf_alloc_req *nixlf; + struct nix_lf_alloc_rsp *rsp; + int err; + + pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512; + + /* Get memory to put this msg */ + nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); + if (!nixlf) + return -ENOMEM; + + /* Set RQ/SQ/CQ counts */ + nixlf->rq_cnt = pfvf->hw.rx_queues; + nixlf->sq_cnt = pfvf->hw.tx_queues; + nixlf->cq_cnt = pfvf->qset.cq_cnt; + nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; + nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */ + nixlf->xqe_sz = NIX_XQESZ_W16; + /* We don't know absolute NPA LF idx attached. + * AF will replace 'RVU_DEFAULT_PF_FUNC' with + * NPA LF attached to this RVU PF/VF. + */ + nixlf->npa_func = RVU_DEFAULT_PF_FUNC; + /* Disable alignment pad, enable L2 length check, + * enable L4 TCP/UDP checksum verification. + */ + nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + + rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, + &nixlf->hdr); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + if (rsp->qints < 1) + return -ENXIO; + + return rsp->hdr.rc; +} + +void otx2_sq_free_sqbs(struct otx2_nic *pfvf) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_hw *hw = &pfvf->hw; + struct otx2_snd_queue *sq; + int sqb, qidx; + u64 iova, pa; + + for (qidx = 0; qidx < hw->tx_queues; qidx++) { + sq = &qset->sq[qidx]; + if (!sq->sqb_ptrs) + continue; + for (sqb = 0; sqb < sq->sqb_count; sqb++) { + if (!sq->sqb_ptrs[sqb]) + continue; + iova = sq->sqb_ptrs[sqb]; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + put_page(virt_to_page(phys_to_virt(pa))); + } + sq->sqb_count = 0; + } +} + +void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) +{ + int pool_id, pool_start = 0, pool_end = 0, size = 0; + u64 iova, pa; + + if (type == AURA_NIX_SQ) { + pool_start = otx2_get_pool_idx(pfvf, type, 0); + pool_end = pool_start + pfvf->hw.sqpool_cnt; + size = pfvf->hw.sqb_size; + } + if (type == AURA_NIX_RQ) { + pool_start = otx2_get_pool_idx(pfvf, type, 0); + pool_end = pfvf->hw.rqpool_cnt; + size = RCV_FRAG_LEN; + } + + /* Free SQB and RQB pointers from the aura pool */ + for (pool_id = pool_start; pool_id < pool_end; pool_id++) { + iova = otx2_aura_allocptr(pfvf, pool_id); + while (iova) { + if (type == AURA_NIX_RQ) + iova -= OTX2_HEAD_ROOM; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + dma_unmap_page_attrs(pfvf->dev, iova, size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + put_page(virt_to_page(phys_to_virt(pa))); + iova = otx2_aura_allocptr(pfvf, pool_id); + } + } +} + +void otx2_aura_pool_free(struct otx2_nic *pfvf) +{ + struct otx2_pool *pool; + int pool_id; + + if (!pfvf->qset.pool) + return; + + for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { + pool = &pfvf->qset.pool[pool_id]; + qmem_free(pfvf->dev, pool->stack); + qmem_free(pfvf->dev, pool->fc_addr); + } + devm_kfree(pfvf->dev, pfvf->qset.pool); +} + +static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ + struct npa_aq_enq_req *aq; + struct otx2_pool *pool; + int err; + + pool = &pfvf->qset.pool[pool_id]; + + /* Allocate memory for HW to update Aura count. + * Alloc one cache line, so that it fits all FC_STYPE modes. + */ + if (!pool->fc_addr) { + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); + if (err) + return err; + } + + /* Initialize this aura's context via AF */ + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + } + + aq->aura_id = aura_id; + /* Will be filled by AF with correct pool context address */ + aq->aura.pool_addr = pool_id; + aq->aura.pool_caching = 1; + aq->aura.shift = ilog2(numptrs) - 8; + aq->aura.count = numptrs; + aq->aura.limit = numptrs; + aq->aura.avg_level = 255; + aq->aura.ena = 1; + aq->aura.fc_ena = 1; + aq->aura.fc_addr = pool->fc_addr->iova; + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ + + /* Enable backpressure for RQ aura */ + if (aura_id < pfvf->hw.rqpool_cnt) { + aq->aura.bp_ena = 0; + aq->aura.nix0_bpid = pfvf->bpid[0]; + /* Set backpressure level is same as RQ aura pass level */ + aq->aura.bp = RQ_PASS_LVL_AURA; + } + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_AURA; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size) +{ + struct npa_aq_enq_req *aq; + struct otx2_pool *pool; + int err; + + pool = &pfvf->qset.pool[pool_id]; + /* Alloc memory for stack which is used to store buffer pointers */ + err = qmem_alloc(pfvf->dev, &pool->stack, + stack_pages, pfvf->hw.stack_pg_bytes); + if (err) + return err; + + pool->rbsize = buf_size; + + /* Initialize this pool's context via AF */ + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + qmem_free(pfvf->dev, pool->stack); + return err; + } + aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aq) { + qmem_free(pfvf->dev, pool->stack); + return -ENOMEM; + } + } + + aq->aura_id = pool_id; + aq->pool.stack_base = pool->stack->iova; + aq->pool.stack_caching = 1; + aq->pool.ena = 1; + aq->pool.buf_size = buf_size / 128; + aq->pool.stack_max_pages = stack_pages; + aq->pool.shift = ilog2(numptrs) - 8; + aq->pool.ptr_start = 0; + aq->pool.ptr_end = ~0ULL; + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_POOL; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) +{ + int qidx, pool_id, stack_pages, num_sqbs; + struct otx2_qset *qset = &pfvf->qset; + struct otx2_hw *hw = &pfvf->hw; + struct otx2_snd_queue *sq; + struct otx2_pool *pool; + int err, ptr; + s64 bufptr; + + /* Calculate number of SQBs needed. + * + * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. + * Last SQE is used for pointing to next SQB. + */ + num_sqbs = (hw->sqb_size / 128) - 1; + num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; + + /* Get no of stack pages needed */ + stack_pages = + (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; + + for (qidx = 0; qidx < hw->tx_queues; qidx++) { + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); + /* Initialize aura context */ + err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); + if (err) + goto fail; + + /* Initialize pool context */ + err = otx2_pool_init(pfvf, pool_id, stack_pages, + num_sqbs, hw->sqb_size); + if (err) + goto fail; + } + + /* Flush accumulated messages */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + /* Allocate pointers and free them to aura/pool */ + for (qidx = 0; qidx < hw->tx_queues; qidx++) { + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + + sq = &qset->sq[qidx]; + sq->sqb_count = 0; + sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL); + if (!sq->sqb_ptrs) + return -ENOMEM; + + for (ptr = 0; ptr < num_sqbs; ptr++) { + bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); + if (bufptr <= 0) + return bufptr; + otx2_aura_freeptr(pfvf, pool_id, bufptr); + sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; + } + otx2_get_page(pool); + } + + return 0; +fail: + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + otx2_aura_pool_free(pfvf); + return err; +} + +int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + int stack_pages, pool_id, rq; + struct otx2_pool *pool; + int err, ptr, num_ptrs; + s64 bufptr; + + num_ptrs = pfvf->qset.rqe_cnt; + + stack_pages = + (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; + + for (rq = 0; rq < hw->rx_queues; rq++) { + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); + /* Initialize aura context */ + err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); + if (err) + goto fail; + } + for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { + err = otx2_pool_init(pfvf, pool_id, stack_pages, + num_ptrs, RCV_FRAG_LEN); + if (err) + goto fail; + } + + /* Flush accumulated messages */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + /* Allocate pointers and free them to aura/pool */ + for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { + pool = &pfvf->qset.pool[pool_id]; + for (ptr = 0; ptr < num_ptrs; ptr++) { + bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL); + if (bufptr <= 0) + return bufptr; + otx2_aura_freeptr(pfvf, pool_id, + bufptr + OTX2_HEAD_ROOM); + } + otx2_get_page(pool); + } + + return 0; +fail: + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + otx2_aura_pool_free(pfvf); + return err; +} + +int otx2_config_npa(struct otx2_nic *pfvf) +{ + struct otx2_qset *qset = &pfvf->qset; + struct npa_lf_alloc_req *npalf; + struct otx2_hw *hw = &pfvf->hw; + int aura_cnt, err; + + /* Pool - Stack of free buffer pointers + * Aura - Alloc/frees pointers from/to pool for NIX DMA. + */ + + if (!hw->pool_cnt) + return -EINVAL; + + qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) * + hw->pool_cnt, GFP_KERNEL); + if (!qset->pool) + return -ENOMEM; + + /* Get memory to put this msg */ + npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); + if (!npalf) + return -ENOMEM; + + /* Set aura and pool counts */ + npalf->nr_pools = hw->pool_cnt; + aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); + npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + return 0; +} + +int otx2_detach_resources(struct mbox *mbox) +{ + struct rsrc_detach *detach; + + otx2_mbox_lock(mbox); + detach = otx2_mbox_alloc_msg_detach_resources(mbox); + if (!detach) { + otx2_mbox_unlock(mbox); + return -ENOMEM; + } + + /* detach all */ + detach->partial = false; + + /* Send detach request to AF */ + otx2_mbox_msg_send(&mbox->mbox, 0); + otx2_mbox_unlock(mbox); + return 0; +} +EXPORT_SYMBOL(otx2_detach_resources); + +int otx2_attach_npa_nix(struct otx2_nic *pfvf) +{ + struct rsrc_attach *attach; + struct msg_req *msix; + int err; + + otx2_mbox_lock(&pfvf->mbox); + /* Get memory to put this msg */ + attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); + if (!attach) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + attach->npalf = true; + attach->nixlf = true; + + /* Send attach request to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + + pfvf->nix_blkaddr = BLKADDR_NIX0; + + /* If the platform has two NIX blocks then LF may be + * allocated from NIX1. + */ + if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) + pfvf->nix_blkaddr = BLKADDR_NIX1; + + /* Get NPA and NIX MSIX vector offsets */ + msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); + if (!msix) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + otx2_mbox_unlock(&pfvf->mbox); + + if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || + pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { + dev_err(pfvf->dev, + "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(otx2_attach_npa_nix); + +void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) +{ + struct hwctx_disable_req *req; + + otx2_mbox_lock(mbox); + /* Request AQ to disable this context */ + if (npa) + req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); + else + req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); + + if (!req) { + otx2_mbox_unlock(mbox); + return; + } + + req->ctype = type; + + if (otx2_sync_mbox_msg(mbox)) + dev_err(mbox->pfvf->dev, "%s failed to disable context\n", + __func__); + + otx2_mbox_unlock(mbox); +} + +int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) +{ + struct nix_bp_cfg_req *req; + + if (enable) + req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); + + if (!req) + return -ENOMEM; + + req->chan_base = 0; + req->chan_cnt = 1; + req->bpid_per_chan = 0; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static inline void otx2_nix_rq_op_stats(struct queue_stats *stats, + struct otx2_nic *pfvf, int qidx) +{ + u64 incr = (u64)qidx << 32; + u64 *ptr; + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); + stats->bytes = otx2_atomic64_add(incr, ptr); + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); + stats->pkts = otx2_atomic64_add(incr, ptr); +} + +static inline void otx2_nix_sq_op_stats(struct queue_stats *stats, + struct otx2_nic *pfvf, int qidx) +{ + u64 incr = (u64)qidx << 32; + u64 *ptr; + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); + stats->bytes = otx2_atomic64_add(incr, ptr); + + ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); + stats->pkts = otx2_atomic64_add(incr, ptr); +} + +/* Mbox message handlers */ +void mbox_handler_cgx_stats(struct otx2_nic *pfvf, + struct cgx_stats_rsp *rsp) +{ + int id; + + for (id = 0; id < CGX_RX_STATS_COUNT; id++) + pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; + for (id = 0; id < CGX_TX_STATS_COUNT; id++) + pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; +} + +void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, + struct cgx_fec_stats_rsp *rsp) +{ + pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks; + pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks; +} + +void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, + struct nix_txsch_alloc_rsp *rsp) +{ + int lvl, schq; + + /* Setup transmit scheduler list */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + for (schq = 0; schq < rsp->schq[lvl]; schq++) + pf->hw.txschq_list[lvl][schq] = + rsp->schq_list[lvl][schq]; +} +EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc); + +void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, + struct npa_lf_alloc_rsp *rsp) +{ + pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; + pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; +} +EXPORT_SYMBOL(mbox_handler_npa_lf_alloc); + +void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, + struct nix_lf_alloc_rsp *rsp) +{ + pfvf->hw.sqb_size = rsp->sqb_size; + pfvf->hw.rx_chan_base = rsp->rx_chan_base; + pfvf->hw.tx_chan_base = rsp->tx_chan_base; + pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; + pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; +} +EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); + +void mbox_handler_msix_offset(struct otx2_nic *pfvf, + struct msix_offset_rsp *rsp) +{ + pfvf->hw.npa_msixoff = rsp->npa_msixoff; + pfvf->hw.nix_msixoff = rsp->nix_msixoff; +} +EXPORT_SYMBOL(mbox_handler_msix_offset); + +void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, + struct nix_bp_cfg_rsp *rsp) +{ + int chan, chan_id; + + for (chan = 0; chan < rsp->chan_cnt; chan++) { + chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); + pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; + } +} +EXPORT_SYMBOL(mbox_handler_nix_bp_enable); + +void otx2_free_cints(struct otx2_nic *pfvf, int n) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_hw *hw = &pfvf->hw; + int irq, qidx; + + for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + qidx < n; + qidx++, irq++) { + int vector = pci_irq_vector(pfvf->pdev, irq); + + irq_set_affinity_hint(vector, NULL); + free_cpumask_var(hw->affinity_mask[irq]); + free_irq(vector, &qset->napi[qidx]); + } +} + +void otx2_set_cints_affinity(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + int vec, cpu, irq, cint; + + vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + cpu = cpumask_first(cpu_online_mask); + + /* CQ interrupts */ + for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { + if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) + return; + + cpumask_set_cpu(cpu, hw->affinity_mask[vec]); + + irq = pci_irq_vector(pfvf->pdev, vec); + irq_set_affinity_hint(irq, hw->affinity_mask[vec]); + + cpu = cpumask_next(cpu, cpu_online_mask); + if (unlikely(cpu >= nr_cpu_ids)) + cpu = 0; + } +} + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +int __weak \ +otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ + struct _req_type *req, \ + struct _rsp_type *rsp) \ +{ \ + /* Nothing to do here */ \ + return 0; \ +} \ +EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); +MBOX_UP_CGX_MESSAGES +#undef M diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h new file mode 100644 index 000000000000..b88763010b85 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -0,0 +1,761 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_COMMON_H +#define OTX2_COMMON_H + +#include <linux/pci.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> +#include <linux/iommu.h> + +#include <mbox.h> +#include "otx2_reg.h" +#include "otx2_txrx.h" + +/* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 +#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 +#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 + +#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_95XX_RVU_PFVF 0xB200 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 2 +#define PCI_MBOX_BAR_NUM 4 + +#define NAME_SIZE 32 + +enum arua_mapped_qtypes { + AURA_NIX_RQ, + AURA_NIX_SQ, +}; + +/* NIX LF interrupts range*/ +#define NIX_LF_QINT_VEC_START 0x00 +#define NIX_LF_CINT_VEC_START 0x40 +#define NIX_LF_GINT_VEC 0x80 +#define NIX_LF_ERR_VEC 0x81 +#define NIX_LF_POISON_VEC 0x82 + +/* RSS configuration */ +struct otx2_rss_info { + u8 enable; + u32 flowkey_cfg; + u16 rss_size; + u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; +#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ + u8 key[RSS_HASH_KEY_SIZE]; +}; + +/* NIX (or NPC) RX errors */ +enum otx2_errlvl { + NPC_ERRLVL_RE, + NPC_ERRLVL_LID_LA, + NPC_ERRLVL_LID_LB, + NPC_ERRLVL_LID_LC, + NPC_ERRLVL_LID_LD, + NPC_ERRLVL_LID_LE, + NPC_ERRLVL_LID_LF, + NPC_ERRLVL_LID_LG, + NPC_ERRLVL_LID_LH, + NPC_ERRLVL_NIX = 0x0F, +}; + +enum otx2_errcodes_re { + /* NPC_ERRLVL_RE errcodes */ + ERRCODE_FCS = 0x7, + ERRCODE_FCS_RCV = 0x8, + ERRCODE_UNDERSIZE = 0x10, + ERRCODE_OVERSIZE = 0x11, + ERRCODE_OL2_LEN_MISMATCH = 0x12, + /* NPC_ERRLVL_NIX errcodes */ + ERRCODE_OL3_LEN = 0x10, + ERRCODE_OL4_LEN = 0x11, + ERRCODE_OL4_CSUM = 0x12, + ERRCODE_IL3_LEN = 0x20, + ERRCODE_IL4_LEN = 0x21, + ERRCODE_IL4_CSUM = 0x22, +}; + +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; + +struct otx2_dev_stats { + u64 rx_bytes; + u64 rx_frames; + u64 rx_ucast_frames; + u64 rx_bcast_frames; + u64 rx_mcast_frames; + u64 rx_drops; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_ucast_frames; + u64 tx_bcast_frames; + u64 tx_mcast_frames; + u64 tx_drops; +}; + +/* Driver counted stats */ +struct otx2_drv_stats { + atomic_t rx_fcs_errs; + atomic_t rx_oversize_errs; + atomic_t rx_undersize_errs; + atomic_t rx_csum_errs; + atomic_t rx_len_errs; + atomic_t rx_other_errs; +}; + +struct mbox { + struct otx2_mbox mbox; + struct work_struct mbox_wrk; + struct otx2_mbox mbox_up; + struct work_struct mbox_up_wrk; + struct otx2_nic *pfvf; + void *bbuf_base; /* Bounce buffer for mbox memory */ + struct mutex lock; /* serialize mailbox access */ + int num_msgs; /*mbox number of messages*/ + int up_num_msgs;/* mbox_up number of messages*/ +}; + +struct otx2_hw { + struct pci_dev *pdev; + struct otx2_rss_info rss_info; + u16 rx_queues; + u16 tx_queues; + u16 max_queues; + u16 pool_cnt; + u16 rqpool_cnt; + u16 sqpool_cnt; + + /* NPA */ + u32 stack_pg_ptrs; /* No of ptrs per stack page */ + u32 stack_pg_bytes; /* Size of stack page */ + u16 sqb_size; + + /* NIX */ + u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + + /* HW settings, coalescing etc */ + u16 rx_chan_base; + u16 tx_chan_base; + u16 cq_qcount_wait; + u16 cq_ecount_wait; + u16 rq_skid; + u8 cq_time_wait; + + /* For TSO segmentation */ + u8 lso_tsov4_idx; + u8 lso_tsov6_idx; + u8 hw_tso; + + /* MSI-X*/ + u8 cint_cnt; /* CQ interrupt count */ + u16 npa_msixoff; /* Offset of NPA vectors */ + u16 nix_msixoff; /* Offset of NIX vectors */ + char *irq_name; + cpumask_var_t *affinity_mask; + + /* Stats */ + struct otx2_dev_stats dev_stats; + struct otx2_drv_stats drv_stats; + u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; + u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; + u64 cgx_fec_corr_blks; + u64 cgx_fec_uncorr_blks; +}; + +struct otx2_ptp { + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + struct otx2_nic *nic; + + struct cyclecounter cycle_counter; + struct timecounter time_counter; + bool ptp_en; +}; + +struct otx2_vf_config { + struct otx2_nic *pf; + struct delayed_work link_event_work; + struct delayed_work ptp_info_work; + bool intf_down; /* interface was either configured or not */ + u8 mac[ETH_ALEN]; + u16 vlan; +}; + +struct flr_work { + struct work_struct work; + struct otx2_nic *pf; +}; + +struct refill_work { + struct delayed_work pool_refill_work; + struct otx2_nic *pf; +}; + +struct otx2_mac_table { + u8 addr[ETH_ALEN]; + u16 mcam_entry; + bool inuse; +}; + +struct otx2_nic { + void __iomem *reg_base; + struct net_device *netdev; + void *iommu_domain; + u16 iommu_domain_type; + u16 xtra_hdr; + u16 max_frs; + +#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) +#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) +#define OTX2_FLAG_INTF_DOWN BIT_ULL(2) +#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) +#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) +#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) +#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) + u64 flags; + + struct otx2_qset qset; + struct otx2_hw hw; + struct pci_dev *pdev; + struct device *dev; + + /* Mbox */ + struct mbox mbox; + struct mbox *mbox_pfvf; + struct workqueue_struct *mbox_wq; + struct workqueue_struct *mbox_pfvf_wq; + + u8 total_vfs; + u16 pcifunc; /* RVU PF_FUNC */ + u16 bpid[NIX_MAX_BPID_CHAN]; + struct otx2_ptp *ptp; + struct otx2_vf_config *vf_configs; + struct cgx_link_user_info linfo; + + /* NPC MCAM */ + u32 nr_flows; + u32 ntuple_max_flows; + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; + struct list_head flows; + struct otx2_mac_table *mac_table; + + u64 reset_count; + struct work_struct reset_task; + struct workqueue_struct *flr_wq; + struct flr_work *flr_wrk; + struct refill_work *refill_wrk; + struct work_struct otx2_rx_mode_work; + struct workqueue_struct *otx2_ndo_wq; + + /* Ethtool stuff */ + u32 msg_enable; + +#define OTX2_PRIV_FLAG_PAM4 BIT(0) +#define OTX2_PRIV_FLAG_EDSA_HDR BIT(1) +#define OTX2_PRIV_FLAG_HIGIG2_HDR BIT(2) +#define OTX2_IS_EDSA_ENABLED(flags) ((flags) & \ + OTX2_PRIV_FLAG_EDSA_HDR) +#define OTX2_IS_HIGIG2_ENABLED(flags) ((flags) & \ + OTX2_PRIV_FLAG_HIGIG2_HDR) + u32 ethtool_flags; + + /* extended DSA and EDSA header lengths are 8/16 bytes + * so take max length 16 bytes here + */ +#define OTX2_EDSA_HDR_LEN 16 +#define OTX2_HIGIG2_HDR_LEN 16 + u32 addl_mtu; + /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ + int nix_blkaddr; +}; + +static inline bool is_otx2_lbkvf(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; +} + +static inline bool is_96xx_A0(struct pci_dev *pdev) +{ + return (pdev->revision == 0x00) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); +} + +static inline bool is_95xx_A0(struct pci_dev *pdev) +{ + return ((pdev->revision == 0x10) || (pdev->revision == 0x11)) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_95XX_RVU_PFVF); +} + +static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) +{ + struct otx2_hw *hw = &pfvf->hw; + + pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; + pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; + pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; + + hw->hw_tso = true; + + if (is_96xx_A0(pfvf->pdev) || is_95xx_A0(pfvf->pdev)) { + hw->hw_tso = false; + /* Due to HW issue previous silicons required minimum 600 + * unused CQE to avoid CQ overflow. + */ + pfvf->hw.rq_skid = 600; + pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); + } + if (is_96xx_A0(pfvf->pdev)) + pfvf->hw.cq_qcount_wait = 0x0; +} + +static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) +{ + u64 blkaddr; + + switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { + case BLKTYPE_NIX: + blkaddr = nic->nix_blkaddr; + break; + case BLKTYPE_NPA: + blkaddr = BLKADDR_NPA; + break; + default: + blkaddr = BLKADDR_RVUM; + break; + }; + + offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); + offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); + + return nic->reg_base + offset; +} + +/* Register read/write APIs */ +static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) +{ + void __iomem *addr = otx2_get_regaddr(nic, offset); + + writeq(val, addr); +} + +static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) +{ + void __iomem *addr = otx2_get_regaddr(nic, offset); + + return readq(addr); +} + +/* Mbox bounce buffer APIs */ +static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) +{ + struct otx2_mbox_dev *mdev; + struct otx2_mbox *otx2_mbox; + + mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); + if (!mbox->bbuf_base) + return -ENOMEM; + + /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF + * prepare all mbox messages in bounce buffer instead of directly + * in hw mbox memory. + */ + otx2_mbox = &mbox->mbox; + mdev = &otx2_mbox->dev[0]; + mdev->mbase = mbox->bbuf_base; + + otx2_mbox = &mbox->mbox_up; + mdev = &otx2_mbox->dev[0]; + mdev->mbase = mbox->bbuf_base; + return 0; +} + +static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) +{ + u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *hdr; + u64 msg_size; + + if (mdev->mbase == hw_mbase) + return; + + hdr = hw_mbase + mbox->rx_start; + msg_size = hdr->msg_size; + + if (msg_size > mbox->rx_size - msgs_offset) + msg_size = mbox->rx_size - msgs_offset; + + /* Copy mbox messages from mbox memory to bounce buffer */ + memcpy(mdev->mbase + mbox->rx_start, + hw_mbase + mbox->rx_start, msg_size + msgs_offset); +} + +static inline void otx2_mbox_lock_init(struct mbox *mbox) +{ + mutex_init(&mbox->lock); +} + +static inline void otx2_mbox_lock(struct mbox *mbox) +{ + mutex_lock(&mbox->lock); +} + +static inline void otx2_mbox_unlock(struct mbox *mbox) +{ + mutex_unlock(&mbox->lock); +} + +/* With the absence of API for 128-bit IO memory access for arm64, + * implement required operations at place. + */ +#ifdef __BIG_ENDIAN +#define otx2_high(high, low) (low) +#define otx2_low(high, low) (high) +#else +#define otx2_high(high, low) (high) +#define otx2_low(high, low) (low) +#endif + +#if defined(CONFIG_ARM64) +static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) +{ + __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" + ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); +} + +static inline __uint128_t otx2_read128(const void __iomem *addr) +{ + __uint128_t *__addr = (__force __uint128_t *)addr; + u64 h, l; + + __asm__ volatile("ldp %x[x0], %x[x1], %x[p1]" + : [x0]"=r"(l), [x1]"=r"(h) + : [p1]"Ump"(*__addr)); + + return (__uint128_t)le64_to_cpu(otx2_low(h, l)) | + (((__uint128_t)le64_to_cpu(otx2_high(h, l))) << 64); +} + +static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) +{ + u64 result; + + __asm__ volatile(".cpu generic+lse\n" + "ldadd %x[i], %x[r], [%[b]]" + : [r]"=r"(result), "+m"(*ptr) + : [i]"r"(incr), [b]"r"(ptr) + : "memory"); + return result; +} + +static inline u64 otx2_lmt_flush(uint64_t addr) +{ + u64 result = 0; + + __asm__ volatile(".cpu generic+lse\n" + "ldeor xzr,%x[rf],[%[rs]]" + : [rf]"=r"(result) + : [rs]"r"(addr)); + return result; +} +#else +#define otx2_write128(lo, hi, addr) +#define otx2_read128(addr) ({ 0; }) +#define otx2_atomic64_add(incr, ptr) ({ 0; }) +#define otx2_lmt_flush(addr) ({ 0; }) +#endif + +/* Alloc pointer from pool/aura */ +static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) +{ + u64 *ptr = (u64 *)otx2_get_regaddr(pfvf, + NPA_LF_AURA_OP_ALLOCX(0)); + u64 incr = (u64)aura | BIT_ULL(63); + + return otx2_atomic64_add(incr, ptr); +} + +/* Free pointer to a pool/aura */ +static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, + int aura, s64 buf) +{ + otx2_write128((u64)buf, (u64)aura | BIT_ULL(63), + otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); +} + +/* Update page ref count */ +static inline void otx2_get_page(struct otx2_pool *pool) +{ + if (!pool->page) + return; + + if (pool->pageref) + page_ref_add(pool->page, pool->pageref); + pool->pageref = 0; + pool->page = NULL; +} + +static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) +{ + if (type == AURA_NIX_SQ) + return pfvf->hw.rqpool_cnt + idx; + + /* AURA_NIX_RQ */ + return idx; +} + +/* Mbox APIs */ +static inline int otx2_sync_mbox_msg(struct mbox *mbox) +{ + int err; + + if (!otx2_mbox_nonempty(&mbox->mbox, 0)) + return 0; + otx2_mbox_msg_send(&mbox->mbox, 0); + err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); +} + +static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) +{ + int err; + + if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) + return 0; + otx2_mbox_msg_send(&mbox->mbox_up, devid); + err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); +} + +/* Use this API to send mbox msgs in atomic context + * where sleeping is not allowed + */ +static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) +{ + int err; + + if (!otx2_mbox_nonempty(&mbox->mbox, 0)) + return 0; + otx2_mbox_msg_send(&mbox->mbox, 0); + err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); +} + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &mbox->mbox, 0, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + return req; \ +} + +MBOX_MESSAGES +#undef M + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +int \ +otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ + struct _req_type *req, \ + struct _rsp_type *rsp); \ + +MBOX_UP_CGX_MESSAGES +#undef M + +/* Time to wait before watchdog kicks off. + * Due to PSE deadlock errata, XOFF on TL2 transmission + * queues takes more time than default watchdog timeout. + * Hence setting this value higher. + */ +#define OTX2_TX_TIMEOUT (100000 * HZ) + +#define RVU_PFVF_PF_SHIFT 10 +#define RVU_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF + +static inline int rvu_get_pf(u16 pcifunc) +{ + return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; +} + +static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t iova; + + if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY) + return page_to_phys(page) + offset; + + iova = dma_map_page_attrs(pfvf->dev, page, + offset, size, dir, attrs); + if (unlikely(dma_mapping_error(pfvf->dev, iova))) + return (dma_addr_t)NULL; + return iova; +} + +static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY) + return; + + dma_unmap_page_attrs(pfvf->dev, addr, size, dir, attrs); +} + +/* MSI-X APIs */ +void otx2_free_cints(struct otx2_nic *pfvf, int n); +void otx2_set_cints_affinity(struct otx2_nic *pfvf); + +int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac); +int otx2_set_mac_address(struct net_device *netdev, void *p); +int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); +void otx2_tx_timeout(struct net_device *netdev); +void otx2_get_mac_from_af(struct net_device *netdev); +void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); + +/* RVU block related APIs */ +int otx2_attach_npa_nix(struct otx2_nic *pfvf); +int otx2_detach_resources(struct mbox *mbox); +int otx2_config_npa(struct otx2_nic *pfvf); +int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); +int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); +void otx2_aura_pool_free(struct otx2_nic *pfvf); +void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); +void otx2_sq_free_sqbs(struct otx2_nic *pfvf); +int otx2_config_nix(struct otx2_nic *pfvf); +int otx2_config_nix_queues(struct otx2_nic *pfvf); +int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); +int otx2_txsch_alloc(struct otx2_nic *pfvf); +int otx2_txschq_stop(struct otx2_nic *pfvf); +dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, + gfp_t gfp); +int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); +void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); +int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); +void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); + +/* RSS configuration APIs*/ +int otx2_rss_init(struct otx2_nic *pfvf); +int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); +void otx2_set_rss_key(struct otx2_nic *pfvf); +int otx2_set_rss_table(struct otx2_nic *pfvf); + +/* Mbox handlers */ +void mbox_handler_msix_offset(struct otx2_nic *pfvf, + struct msix_offset_rsp *rsp); +void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, + struct npa_lf_alloc_rsp *rsp); +void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, + struct nix_lf_alloc_rsp *rsp); +void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, + struct nix_txsch_alloc_rsp *rsp); +void mbox_handler_cgx_stats(struct otx2_nic *pfvf, + struct cgx_stats_rsp *rsp); +void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, + struct cgx_fec_stats_rsp *rsp); +void otx2_set_fec_stats_count(struct otx2_nic *pfvf); +void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, + struct nix_bp_cfg_rsp *rsp); + +/* Device stats APIs */ +void otx2_get_dev_stats(struct otx2_nic *pfvf); +void otx2_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); +void otx2_update_lmac_stats(struct otx2_nic *pfvf); +void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); +int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); +int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); +void otx2_set_ethtool_ops(struct net_device *netdev); +void otx2vf_set_ethtool_ops(struct net_device *netdev); + +int otx2_open(struct net_device *netdev); +int otx2_stop(struct net_device *netdev); +int otx2vf_open(struct net_device *netdev); +int otx2vf_stop(struct net_device *netdev); +int otx2_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues); +int otx2_set_npc_parse_mode(struct otx2_nic *pfvf); + +/* MCAM filter related APIs */ +void otx2_do_set_rx_mode(struct work_struct *work); +int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); +int otx2_mcam_flow_init(struct otx2_nic *pf); +int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); +void otx2_mcam_flow_del(struct otx2_nic *pf); +int otx2_destroy_ntuple_flows(struct otx2_nic *pf); +int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); +int otx2_get_flow(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc, u32 location); +int otx2_get_all_flows(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc, u32 *rule_locs); +int otx2_add_flow(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp); +int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); +int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, + struct npc_install_flow_req *req); +int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); +int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); +int otx2smqvf_probe(struct otx2_nic *vf); +int otx2smqvf_remove(struct otx2_nic *vf); + +#endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c new file mode 100644 index 000000000000..e2cb70a79dbe --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -0,0 +1,1565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/pci.h> +#include <linux/net_tstamp.h> +#include <linux/ethtool.h> +#include <linux/stddef.h> +#include <linux/etherdevice.h> +#include <linux/log2.h> + +#include "otx2_common.h" +#include "otx2_ptp.h" + +#define DRV_NAME "octeontx2-nicpf" +#define DRV_VERSION "1.0" +#define DRV_VF_NAME "octeontx2-nicvf" +#define DRV_VF_VERSION "1.0" + +#define OTX2_DEFAULT_ACTION 0x1 + +static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf); + +static const char otx2_priv_flags_strings[][ETH_GSTRING_LEN] = { + "pam4", + "edsa", + "higig2", +}; + +struct otx2_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +/* HW device stats */ +#define OTX2_DEV_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ +} + +#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CE23 //110001110001100111000100011 +#define OTX2_ETHTOOL_ALL_MODES (ULLONG_MAX) + +static const struct otx2_stat otx2_dev_stats[] = { + OTX2_DEV_STAT(rx_bytes), + OTX2_DEV_STAT(rx_frames), + OTX2_DEV_STAT(rx_ucast_frames), + OTX2_DEV_STAT(rx_bcast_frames), + OTX2_DEV_STAT(rx_mcast_frames), + OTX2_DEV_STAT(rx_drops), + + OTX2_DEV_STAT(tx_bytes), + OTX2_DEV_STAT(tx_frames), + OTX2_DEV_STAT(tx_ucast_frames), + OTX2_DEV_STAT(tx_bcast_frames), + OTX2_DEV_STAT(tx_mcast_frames), + OTX2_DEV_STAT(tx_drops), +}; + +/* Driver level stats */ +#define OTX2_DRV_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ +} + +static const struct otx2_stat otx2_drv_stats[] = { + OTX2_DRV_STAT(rx_fcs_errs), + OTX2_DRV_STAT(rx_oversize_errs), + OTX2_DRV_STAT(rx_undersize_errs), + OTX2_DRV_STAT(rx_csum_errs), + OTX2_DRV_STAT(rx_len_errs), + OTX2_DRV_STAT(rx_other_errs), +}; + +static const struct otx2_stat otx2_queue_stats[] = { + { "bytes", 0 }, + { "frames", 1 }, +}; + +static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); +static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); +static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); + +int __weak otx2vf_open(struct net_device *netdev) +{ + return 0; +} + +int __weak otx2vf_stop(struct net_device *netdev) +{ + return 0; +} + +static void otx2_dev_open(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (pfvf->pcifunc & RVU_PFVF_FUNC_MASK) + otx2vf_open(netdev); + else + otx2_open(netdev); +} + +static void otx2_dev_stop(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (pfvf->pcifunc & RVU_PFVF_FUNC_MASK) + otx2vf_stop(netdev); + else + otx2_stop(netdev); +} + +static void otx2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); +} + +static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) +{ + int start_qidx = qset * pfvf->hw.rx_queues; + int qidx, stats; + + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { + for (stats = 0; stats < otx2_n_queue_stats; stats++) { + sprintf(*data, "rxq%d: %s", qidx + start_qidx, + otx2_queue_stats[stats].name); + *data += ETH_GSTRING_LEN; + } + } + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (stats = 0; stats < otx2_n_queue_stats; stats++) { + sprintf(*data, "txq%d: %s", qidx + start_qidx, + otx2_queue_stats[stats].name); + *data += ETH_GSTRING_LEN; + } + } +} + +static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int stats; + + if (sset == ETH_SS_PRIV_FLAGS) { + memcpy(data, otx2_priv_flags_strings, + ARRAY_SIZE(otx2_priv_flags_strings) * ETH_GSTRING_LEN); + return; + } + + if (sset != ETH_SS_STATS) + return; + + for (stats = 0; stats < otx2_n_dev_stats; stats++) { + memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < otx2_n_drv_stats; stats++) { + memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + otx2_get_qset_strings(pfvf, &data, 0); + + for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { + sprintf(data, "cgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { + sprintf(data, "cgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } + + strcpy(data, "reset_count"); + data += ETH_GSTRING_LEN; + if (pfvf->linfo.fec) { + sprintf(data, "Fec Corrected Errors: "); + data += ETH_GSTRING_LEN; + sprintf(data, "Fec Uncorrected Errors: "); + data += ETH_GSTRING_LEN; + } +} + +static void otx2_get_qset_stats(struct otx2_nic *pfvf, + struct ethtool_stats *stats, u64 **data) +{ + int stat, qidx; + + if (!pfvf) + return; + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { + if (!otx2_update_rq_stats(pfvf, qidx)) { + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = 0; + continue; + } + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) + [otx2_queue_stats[stat].index]; + } + + for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + if (!otx2_update_sq_stats(pfvf, qidx)) { + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = 0; + continue; + } + for (stat = 0; stat < otx2_n_queue_stats; stat++) + *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) + [otx2_queue_stats[stat].index]; + } +} + +static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf) +{ + struct msg_req *req; + int rc = -EAGAIN; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox); + if (!req) + goto end; + + if (!otx2_sync_mbox_msg(&pfvf->mbox)) + rc = 0; +end: + otx2_mbox_unlock(&pfvf->mbox); + return rc; +} + +/* Get device and per queue statistics */ +static void otx2_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + u64 fec_corr_blks, fec_uncorr_blks; + struct cgx_fw_data *rsp; + int stat; + + otx2_get_dev_stats(pfvf); + for (stat = 0; stat < otx2_n_dev_stats; stat++) + *(data++) = ((u64 *)&pfvf->hw.dev_stats) + [otx2_dev_stats[stat].index]; + + for (stat = 0; stat < otx2_n_drv_stats; stat++) + *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) + [otx2_drv_stats[stat].index]); + + otx2_get_qset_stats(pfvf, stats, &data); + otx2_update_lmac_stats(pfvf); + for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_rx_stats[stat]; + for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_tx_stats[stat]; + *(data++) = pfvf->reset_count; + + if (pfvf->linfo.fec == OTX2_FEC_NONE) + return; + + fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; + fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks; + + rsp = otx2_get_fwdata(pfvf); + if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && + !otx2_get_phy_fec_stats(pfvf)) { + /* Fetch fwdata again because it's been recently populated with + * latest PHY FEC stats. + */ + rsp = otx2_get_fwdata(pfvf); + if (!IS_ERR(rsp)) { + struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; + + if (pfvf->linfo.fec == OTX2_FEC_BASER) { + fec_corr_blks = p->brfec_corr_blks; + fec_uncorr_blks = p->brfec_uncorr_blks; + } else { + fec_corr_blks = p->rsfec_corr_cws; + fec_uncorr_blks = p->rsfec_uncorr_cws; + } + } + } + + *(data++) = fec_corr_blks; + *(data++) = fec_uncorr_blks; +} + +static int otx2_get_sset_count(struct net_device *netdev, int sset) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + int qstats_count, fec_stats_count = 0; + bool if_up = netif_running(netdev); + + if (sset == ETH_SS_PRIV_FLAGS) + return ARRAY_SIZE(otx2_priv_flags_strings); + + if (sset != ETH_SS_STATS) + return -EINVAL; + + qstats_count = otx2_n_queue_stats * + (pfvf->hw.rx_queues + pfvf->hw.tx_queues); + + if (!if_up || !pfvf->linfo.fec) { + return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + + CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1; + } + fec_stats_count = 2; + otx2_update_lmac_fec_stats(pfvf); + return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + + CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1 + + fec_stats_count; +} + +/* Get no of queues device supports and current queue count */ +static void otx2_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + memset(channel, 0, sizeof(*channel)); + channel->max_rx = pfvf->hw.max_queues; + channel->max_tx = pfvf->hw.max_queues; + + channel->rx_count = pfvf->hw.rx_queues; + channel->tx_count = pfvf->hw.tx_queues; +} + +/* Set no of Tx, Rx queues to be used */ +static int otx2_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + bool if_up = netif_running(dev); + int err = 0; + + if (!channel->rx_count || !channel->tx_count) + return -EINVAL; + if (channel->rx_count > pfvf->hw.max_queues) + return -EINVAL; + if (channel->tx_count > pfvf->hw.max_queues) + return -EINVAL; + + if (if_up) + otx2_dev_stop(dev); + + pfvf->hw.rx_queues = channel->rx_count; + pfvf->hw.tx_queues = channel->tx_count; + err = otx2_set_real_num_queues(dev, pfvf->hw.tx_queues, + pfvf->hw.rx_queues); + pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues; + if (err) + return err; + + if (if_up) + otx2_dev_open(dev); + + netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", + pfvf->hw.tx_queues, pfvf->hw.rx_queues); + + return err; +} + +static void otx2_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_pause_frm_cfg *req, *rsp; + + req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); + if (!req) + return; + + if (!otx2_sync_mbox_msg(&pfvf->mbox)) { + rsp = (struct cgx_pause_frm_cfg *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + pause->rx_pause = rsp->rx_pause; + pause->tx_pause = rsp->tx_pause; + } +} + +static int otx2_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_pause_frm_cfg *req; + + if (pause->autoneg) + return -EOPNOTSUPP; + + req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); + if (!req) + return -EAGAIN; + + req->set = 1; + req->rx_pause = pause->rx_pause; + req->tx_pause = pause->tx_pause; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static void otx2_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_qset *qs = &pfvf->qset; + + ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); + ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); + ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); + ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); +} + +static int otx2_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + bool if_up = netif_running(netdev); + struct otx2_qset *qs = &pfvf->qset; + u32 rx_count, tx_count; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ + rx_count = clamp_t(u32, ring->rx_pending, + Q_COUNT(Q_SIZE_MIN), Q_COUNT(Q_SIZE_MAX)); + /* On some silicon variants a skid or reserved CQEs are + * needed to avoid CQ overflow. + */ + if (rx_count < pfvf->hw.rq_skid) + rx_count = pfvf->hw.rq_skid; + rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); + + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to maintain to avoid CQ overflow, hence the + * minimum 4K size. + */ + tx_count = clamp_t(u32, ring->tx_pending, + Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); + tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); + + if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt) + return 0; + + if (if_up) + otx2_dev_stop(netdev); + + /* Assigned to the nearest possible exponent. */ + qs->sqe_cnt = tx_count; + qs->rqe_cnt = rx_count; + + if (if_up) + otx2_dev_open(netdev); + return 0; +} + +static int otx2_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_hw *hw = &pfvf->hw; + + cmd->rx_coalesce_usecs = hw->cq_time_wait; + cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; + cmd->tx_coalesce_usecs = hw->cq_time_wait; + cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; + + return 0; +} + +static int otx2_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_hw *hw = &pfvf->hw; + int qidx; + + if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce || + ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq || + ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || ec->rate_sample_interval) + return -EOPNOTSUPP; + + if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) + return 0; + + /* 'cq_time_wait' is 8bit and is in multiple of 100ns, + * so clamp the user given value to the range of 1 to 25usec. + */ + ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, + 1, CQ_TIMER_THRESH_MAX); + ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, + 1, CQ_TIMER_THRESH_MAX); + + /* Rx and Tx are mapped to same CQ, check which one + * is changed, if both then choose the min. + */ + if (hw->cq_time_wait == ec->rx_coalesce_usecs) + hw->cq_time_wait = ec->tx_coalesce_usecs; + else if (hw->cq_time_wait == ec->tx_coalesce_usecs) + hw->cq_time_wait = ec->rx_coalesce_usecs; + else + hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, + ec->tx_coalesce_usecs); + + /* Max ecount_wait supported is 16bit, + * so clamp the user given value to the range of 1 to 64k. + */ + ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, + 1, U16_MAX); + ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, + 1, U16_MAX); + + /* Rx and Tx are mapped to same CQ, check which one + * is changed, if both then choose the min. + */ + if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) + hw->cq_ecount_wait = ec->tx_max_coalesced_frames; + else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) + hw->cq_ecount_wait = ec->rx_max_coalesced_frames; + else + hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, + ec->tx_max_coalesced_frames); + + if (netif_running(netdev)) { + for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) + otx2_config_irq_coalescing(pfvf, qidx); + } + + return 0; +} + +static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + + if (!(rss->flowkey_cfg & + (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) + return 0; + + /* Mimimum is IPv4 and IPv6, SIP/DIP */ + nfc->data = RXH_IP_SRC | RXH_IP_DST; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) + nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) + nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) + nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + break; + default: + return -EINVAL; + } + return 0; +} + +static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc) +{ + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + u32 rss_cfg = rss->flowkey_cfg; + u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; + + if (!rss->enable) + netdev_err(pfvf->netdev, "RSS is disabled, cmd ignored\n"); + + /* Mimimum is IPv4 and IPv6, SIP/DIP */ + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* Different config for v4 and v6 is not supported. + * Both of them have to be either 4-tuple or 2-tuple. + */ + if ((nfc->data & rxh_l4) == rxh_l4) + rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; + else + rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if ((nfc->data & rxh_l4) == rxh_l4) + rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; + else + rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if ((nfc->data & rxh_l4) == rxh_l4) + rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; + else + rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; + break; + default: + return -EINVAL; + } + + rss->flowkey_cfg = rss_cfg; + otx2_set_flowkey_cfg(pfvf); + return 0; +} + +static int otx2_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rules) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_GRXRINGS: + nfc->data = pfvf->hw.rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + nfc->rule_cnt = pfvf->nr_flows; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + ret = otx2_get_all_flows(pfvf, nfc, rules); + break; + case ETHTOOL_GRXFH: + return otx2_get_rss_hash_opts(pfvf, nfc); + default: + break; + } + return ret; +} + +int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, + struct npc_install_flow_req *req) +{ + struct ethtool_tcpip4_spec *l4_mask = &fsp->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_hdr = &fsp->h_u.tcp_ip4_spec; + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + struct flow_msg *pmask = &req->mask; + struct flow_msg *pkt = &req->packet; + u32 flow_type; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { + /* bits not set in mask are don't care */ + case ETHER_FLOW: + if (!is_zero_ether_addr(eth_mask->h_source)) { + ether_addr_copy(pkt->smac, eth_hdr->h_source); + ether_addr_copy(pmask->smac, eth_mask->h_source); + req->features |= BIT_ULL(NPC_SMAC); + } + if (!is_zero_ether_addr(eth_mask->h_dest)) { + ether_addr_copy(pkt->dmac, eth_hdr->h_dest); + ether_addr_copy(pmask->dmac, eth_mask->h_dest); + req->features |= BIT_ULL(NPC_DMAC); + } + if (eth_mask->h_proto) { + memcpy(&pkt->etype, ð_hdr->h_proto, + sizeof(pkt->etype)); + memcpy(&pmask->etype, ð_mask->h_proto, + sizeof(pmask->etype)); + req->features |= BIT_ULL(NPC_ETYPE); + } + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (l4_mask->ip4src) { + memcpy(&pkt->ip4src, &l4_hdr->ip4src, + sizeof(pkt->ip4src)); + memcpy(&pmask->ip4src, &l4_mask->ip4src, + sizeof(pmask->ip4src)); + req->features |= BIT_ULL(NPC_SIP_IPV4); + } + if (l4_mask->ip4dst) { + memcpy(&pkt->ip4dst, &l4_hdr->ip4dst, + sizeof(pkt->ip4dst)); + memcpy(&pmask->ip4dst, &l4_mask->ip4dst, + sizeof(pmask->ip4dst)); + req->features |= BIT_ULL(NPC_DIP_IPV4); + } + if (l4_mask->psrc) { + memcpy(&pkt->sport, &l4_hdr->psrc, sizeof(pkt->sport)); + memcpy(&pmask->sport, &l4_mask->psrc, + sizeof(pmask->sport)); + if (flow_type == UDP_V4_FLOW) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else + req->features |= BIT_ULL(NPC_SPORT_TCP); + } + if (l4_mask->pdst) { + memcpy(&pkt->dport, &l4_hdr->pdst, sizeof(pkt->dport)); + memcpy(&pmask->dport, &l4_mask->pdst, + sizeof(pmask->dport)); + if (flow_type == UDP_V4_FLOW) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else + req->features |= BIT_ULL(NPC_DPORT_TCP); + } + break; + default: + return -ENOTSUPP; + } + if (fsp->flow_type & FLOW_EXT) { + if (fsp->m_ext.vlan_etype) + return -EINVAL; + if (fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)) + return -EINVAL; + if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, + sizeof(pkt->vlan_tci)); + memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci, + sizeof(pmask->vlan_tci)); + req->features |= BIT_ULL(NPC_OUTER_VID); + } + /* Not Drop/Direct to queue but use action in default entry */ + if (fsp->m_ext.data[1] && + fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION)) + req->op = NIX_RX_ACTION_DEFAULT; + } + if (fsp->flow_type & FLOW_MAC_EXT && + !is_zero_ether_addr(fsp->m_ext.h_dest)) { + ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest); + ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest); + req->features |= BIT_ULL(NPC_DMAC); + } + + if (!req->features) + return -ENOTSUPP; + + return 0; +} + +static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) +{ + bool ntuple = !!(dev->features & NETIF_F_NTUPLE); + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_SRXFH: + ret = otx2_set_rss_hash_opts(pfvf, nfc); + break; + case ETHTOOL_SRXCLSRLINS: + if (netif_running(dev) && ntuple) + ret = otx2_add_flow(pfvf, &nfc->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + if (netif_running(dev) && ntuple) + ret = otx2_remove_flow(pfvf, nfc->fs.location); + break; + default: + break; + } + + return ret; +} + +static int otx2vf_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rules) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_GRXRINGS: + nfc->data = pfvf->hw.rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + return otx2_get_rss_hash_opts(pfvf, nfc); + default: + break; + } + return ret; +} + +static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_SRXFH: + ret = otx2_set_rss_hash_opts(pfvf, nfc); + break; + default: + break; + } + + return ret; +} + +static u32 otx2_get_rxfh_key_size(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + + return sizeof(rss->key); +} + +static u32 otx2_get_rxfh_indir_size(struct net_device *dev) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + return pfvf->hw.rss_info.rss_size; +} + +/* Get RSS configuration*/ +static int otx2_get_rxfh(struct net_device *dev, u32 *indir, + u8 *hkey, u8 *hfunc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + int idx; + + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + indir[idx] = rss->ind_tbl[idx]; + } + + if (hkey) + memcpy(hkey, rss->key, sizeof(rss->key)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +/* Configure RSS table and hash key*/ +static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *hkey, const u8 hfunc) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss = &pfvf->hw.rss_info; + int idx; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!rss->enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = indir[idx]; + } + + if (hkey) { + memcpy(rss->key, hkey, sizeof(rss->key)); + otx2_set_rss_key(pfvf); + } + + otx2_set_rss_table(pfvf); + return 0; +} + +static int otx2_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (!pfvf->ptp) + return ethtool_op_get_ts_info(netdev, info); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = otx2_ptp_clock_index(pfvf); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +static u32 otx2_get_msglevel(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + return pfvf->msg_enable; +} + +static void otx2_set_msglevel(struct net_device *netdev, u32 val) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + pfvf->msg_enable = val; +} + +static void otx2_get_fec_info(u64 index, int mode, struct ethtool_link_ksettings + *link_ksettings) +{ + switch (index) { + case OTX2_FEC_NONE: + if (mode) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + FEC_NONE); + else + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, + FEC_NONE); + break; + case OTX2_FEC_BASER: + if (mode) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + FEC_BASER); + else + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, + FEC_BASER); + break; + case OTX2_FEC_RS: + if (mode) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + FEC_RS); + else + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, + FEC_RS); + break; + case OTX2_FEC_BASER | OTX2_FEC_RS: + if (mode) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + FEC_BASER); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + FEC_RS); + } else { + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, + FEC_BASER); + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, + FEC_RS); + } + + break; + } +} + +static void otx2_get_link_mode_info(u64 index, int mode, + struct ethtool_link_ksettings + *link_ksettings) +{ + u64 ethtool_link_mode = 0; + int bit_position = 0; + u64 link_modes = 0; + + int cgx_link_mode[29] = {0, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT + }; + link_modes = index & OTX2_ETHTOOL_SUPPORTED_MODES; + + for (bit_position = 0; link_modes; bit_position++, link_modes >>= 1) { + if (!(link_modes & 1)) + continue; + + if (bit_position == 0) + ethtool_link_mode = 0x3F; + + ethtool_link_mode |= 1ULL << cgx_link_mode[bit_position]; + if (mode) + *link_ksettings->link_modes.advertising |= + ethtool_link_mode; + else + *link_ksettings->link_modes.supported |= + ethtool_link_mode; + } +} + +static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf) +{ + struct cgx_fw_data *rsp = NULL; + struct msg_req *req; + int err = 0; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return ERR_PTR(-ENOMEM); + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (!err) { + rsp = (struct cgx_fw_data *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + } else { + rsp = ERR_PTR(err); + } + + otx2_mbox_unlock(&pfvf->mbox); + return rsp; +} + +static int otx2_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_fw_data *rsp; + + rsp = otx2_get_fwdata(pfvf); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + modinfo->type = rsp->fwdata.sfp_eeprom.sff_id; + modinfo->eeprom_len = SFP_EEPROM_SIZE; + return 0; +} + +static int otx2_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_fw_data *rsp; + + rsp = otx2_get_fwdata(pfvf); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + memcpy(data, &rsp->fwdata.sfp_eeprom.buf, ee->len); + + return 0; +} + +static int otx2_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_fw_data *rsp = NULL; + u32 supported = 0; + + cmd->base.duplex = pfvf->linfo.full_duplex; + cmd->base.speed = pfvf->linfo.speed; + cmd->base.autoneg = pfvf->linfo.an; + cmd->base.port = pfvf->linfo.port; + + rsp = otx2_get_fwdata(pfvf); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + if (rsp->fwdata.supported_an) + supported |= SUPPORTED_Autoneg; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, 1, cmd); + otx2_get_fec_info(rsp->fwdata.advertised_fec, 1, cmd); + + otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, 0, cmd); + otx2_get_fec_info(rsp->fwdata.supported_fec, 0, cmd); + + return 0; +} + +static int otx2_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + unsigned long advertising = 0; + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_set_link_mode_req *req; + struct cgx_set_link_mode_rsp *rsp; + int err = 0; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -EAGAIN; + } + + advertising = (*cmd->link_modes.advertising) & (OTX2_ETHTOOL_ALL_MODES); + if (!(advertising & (advertising - 1)) && + (advertising <= BIT_ULL(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT))) { + req->args.mode = advertising; + } else { + otx2_mbox_unlock(&pfvf->mbox); + return -EINVAL; + } + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (!err) { + rsp = (struct cgx_set_link_mode_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (rsp->status) + err = rsp->status; + } + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +static u32 otx2_get_link(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (is_otx2_lbkvf(pfvf->pdev)) + return 1; + return pfvf->linfo.link_up; +} + +static int otx2_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_fw_data *rsp; + int fec[] = { + ETHTOOL_FEC_OFF, + ETHTOOL_FEC_BASER, + ETHTOOL_FEC_RS, + ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS}; +#define FEC_MAX_INDEX 3 + if (pfvf->linfo.fec < FEC_MAX_INDEX) + fecparam->active_fec = fec[pfvf->linfo.fec]; + + rsp = otx2_get_fwdata(pfvf); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + if (rsp->fwdata.supported_fec <= FEC_MAX_INDEX) { + if (!rsp->fwdata.supported_fec) + fecparam->fec = ETHTOOL_FEC_NONE; + else + fecparam->fec = fec[rsp->fwdata.supported_fec]; + } + return 0; +} + +static int otx2_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct fec_mode *req, *rsp; + int err = 0, fec = 0; + + switch (fecparam->fec) { + case ETHTOOL_FEC_OFF: + fec = OTX2_FEC_NONE; + break; + case ETHTOOL_FEC_RS: + fec = OTX2_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = OTX2_FEC_BASER; + break; + default: + fec = OTX2_FEC_NONE; + break; + } + + if (fec == pfvf->linfo.fec) + return 0; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox); + if (!req) { + err = -EAGAIN; + goto end; + } + req->fec = fec; + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto end; + + rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (rsp->fec >= 0) { + pfvf->linfo.fec = rsp->fec; + pfvf->hw.cgx_fec_corr_blks = 0; + pfvf->hw.cgx_fec_uncorr_blks = 0; + + } else { + err = rsp->fec; + } + +end: otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +static u32 otx2_get_priv_flags(struct net_device *netdev) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_fw_data *rsp; + + rsp = otx2_get_fwdata(pfvf); + + if (IS_ERR(rsp)) { + pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4; + } else { + if (rsp->fwdata.phy.misc.mod_type) + pfvf->ethtool_flags |= OTX2_PRIV_FLAG_PAM4; + else + pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4; + } + + return pfvf->ethtool_flags; +} + +static int otx2_set_phy_mod_type(struct net_device *netdev, bool enable) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct cgx_phy_mod_type *req; + struct cgx_fw_data *fwd; + int rc = -EAGAIN; + + fwd = otx2_get_fwdata(pfvf); + if (IS_ERR(fwd)) + return -EAGAIN; + + /* ret here if phy does not support this feature */ + if (!fwd->fwdata.phy.misc.can_change_mod_type) + return -EOPNOTSUPP; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_cgx_set_phy_mod_type(&pfvf->mbox); + if (!req) + goto end; + + req->mod = enable; + + if (!otx2_sync_mbox_msg(&pfvf->mbox)) + rc = 0; +end: + otx2_mbox_unlock(&pfvf->mbox); + return rc; +} + +int otx2_set_npc_parse_mode(struct otx2_nic *pfvf) +{ + struct npc_set_pkind *req; + int rc = -EAGAIN; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_set_pkind(&pfvf->mbox); + if (!req) + goto end; + + if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags)) + req->mode = OTX2_PRIV_FLAGS_HIGIG; + else if (OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags)) + req->mode = OTX2_PRIV_FLAGS_EDSA; + else + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + + req->dir = PKIND_RX; + + /* req AF to change pkind on both the dir */ + if (req->mode == OTX2_PRIV_FLAGS_HIGIG) + req->dir |= PKIND_TX; + + if (!otx2_sync_mbox_msg(&pfvf->mbox)) + rc = 0; +end: + otx2_mbox_unlock(&pfvf->mbox); + return rc; +} + +static int otx2_enable_addl_header(struct net_device *netdev, int bitpos, + u32 len, bool enable) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + bool if_up = netif_running(netdev); + + if (enable) { + pfvf->ethtool_flags |= BIT(bitpos); + } else { + pfvf->ethtool_flags &= ~BIT(bitpos); + len = 0; + } + + if (if_up) + otx2_stop(netdev); + + /* Update max FRS so that additional hdrs are considered */ + pfvf->addl_mtu = len; + + /* Incase HIGIG2 mode is set packet will have 16 bytes of + * extra header at start of packet which stack does not need. + */ + if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags)) + pfvf->xtra_hdr = 16; + else + pfvf->xtra_hdr = 0; + + /* NPC parse mode will be updated here */ + if (if_up) + otx2_open(netdev); + + return 0; +} + +static int otx2_set_priv_flags(struct net_device *netdev, u32 new_flags) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + bool enable = false; + int bitnr, rc = 0; + u32 chg_flags; + + /* Get latest PAM4 settings */ + otx2_get_priv_flags(netdev); + + chg_flags = new_flags ^ pfvf->ethtool_flags; + if (!chg_flags) + return 0; + + /* Some are mutually exclusive, so allow only change at a time */ + if (hweight32(chg_flags) != 1) + return -EINVAL; + + bitnr = ffs(chg_flags) - 1; + if (new_flags & BIT(bitnr)) + enable = true; + + switch (BIT(bitnr)) { + case OTX2_PRIV_FLAG_PAM4: + rc = otx2_set_phy_mod_type(netdev, enable); + break; + case OTX2_PRIV_FLAG_EDSA_HDR: + /* HIGIG & EDSA are mutual exclusive */ + if (enable && OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags)) + return -EINVAL; + return otx2_enable_addl_header(netdev, bitnr, + OTX2_EDSA_HDR_LEN, enable); + break; + case OTX2_PRIV_FLAG_HIGIG2_HDR: + if (enable && OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags)) + return -EINVAL; + return otx2_enable_addl_header(netdev, bitnr, + OTX2_HIGIG2_HDR_LEN, enable); + break; + default: + break; + } + + /* save the change */ + if (!rc) { + if (enable) + pfvf->ethtool_flags |= BIT(bitnr); + else + pfvf->ethtool_flags &= ~BIT(bitnr); + } + + return rc; +} + +static struct ethtool_ops otx2_ethtool_ops = { + .get_link = otx2_get_link, + .get_drvinfo = otx2_get_drvinfo, + .get_strings = otx2_get_strings, + .get_ethtool_stats = otx2_get_ethtool_stats, + .get_sset_count = otx2_get_sset_count, + .set_channels = otx2_set_channels, + .get_channels = otx2_get_channels, + .get_ringparam = otx2_get_ringparam, + .set_ringparam = otx2_set_ringparam, + .get_coalesce = otx2_get_coalesce, + .set_coalesce = otx2_set_coalesce, + .get_rxnfc = otx2_get_rxnfc, + .set_rxnfc = otx2_set_rxnfc, + .get_rxfh_key_size = otx2_get_rxfh_key_size, + .get_rxfh_indir_size = otx2_get_rxfh_indir_size, + .get_rxfh = otx2_get_rxfh, + .set_rxfh = otx2_set_rxfh, + .get_ts_info = otx2_get_ts_info, + .get_msglevel = otx2_get_msglevel, + .set_msglevel = otx2_set_msglevel, + .get_link_ksettings = otx2_get_link_ksettings, + .set_link_ksettings = otx2_set_link_ksettings, + .get_pauseparam = otx2_get_pauseparam, + .set_pauseparam = otx2_set_pauseparam, + .get_fecparam = otx2_get_fecparam, + .set_fecparam = otx2_set_fecparam, + .get_module_info = otx2_get_module_info, + .get_module_eeprom = otx2_get_module_eeprom, + .get_priv_flags = otx2_get_priv_flags, + .set_priv_flags = otx2_set_priv_flags, +}; + +void otx2_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &otx2_ethtool_ops; +} + +/* VF's ethtool APIs */ +static void otx2vf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct otx2_nic *vf = netdev_priv(netdev); + + strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VF_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); +} + +static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int stats; + + if (sset != ETH_SS_STATS) + return; + + for (stats = 0; stats < otx2_n_dev_stats; stats++) { + memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + otx2_get_qset_strings(vf, &data, 0); +} + +static void otx2vf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int stat; + + otx2_get_dev_stats(vf); + + for (stat = 0; stat < otx2_n_dev_stats; stat++) { + *data = ((u64 *)&vf->hw.dev_stats)[otx2_dev_stats[stat].index]; + data++; + } + + otx2_get_qset_stats(vf, stats, &data); +} + +static int otx2vf_get_sset_count(struct net_device *netdev, int sset) +{ + struct otx2_nic *vf = netdev_priv(netdev); + + if (sset != ETH_SS_STATS) + return -EINVAL; + + return otx2_n_dev_stats + + otx2_n_queue_stats * (vf->hw.rx_queues + vf->hw.tx_queues); +} + +static int otx2vf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + if (is_otx2_lbkvf(pfvf->pdev)) { + cmd->base.port = PORT_OTHER; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.speed = SPEED_100000; + } else { + return otx2_get_link_ksettings(netdev, cmd); + } + return 0; +} +static const struct ethtool_ops otx2vf_ethtool_ops = { + .get_link = otx2_get_link, + .get_drvinfo = otx2vf_get_drvinfo, + .get_strings = otx2vf_get_strings, + .get_ethtool_stats = otx2vf_get_ethtool_stats, + .get_sset_count = otx2vf_get_sset_count, + .set_channels = otx2_set_channels, + .get_channels = otx2_get_channels, + .get_rxnfc = otx2vf_get_rxnfc, + .set_rxnfc = otx2vf_set_rxnfc, + .get_rxfh_key_size = otx2_get_rxfh_key_size, + .get_rxfh_indir_size = otx2_get_rxfh_indir_size, + .get_rxfh = otx2_get_rxfh, + .set_rxfh = otx2_set_rxfh, + .get_ringparam = otx2_get_ringparam, + .set_ringparam = otx2_set_ringparam, + .get_coalesce = otx2_get_coalesce, + .set_coalesce = otx2_set_coalesce, + .get_pauseparam = otx2_get_pauseparam, + .set_pauseparam = otx2_set_pauseparam, + .get_link_ksettings = otx2vf_get_link_ksettings, +}; + +void otx2vf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &otx2vf_ethtool_ops; +} +EXPORT_SYMBOL(otx2vf_set_ethtool_ops); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c new file mode 100644 index 000000000000..39f439202235 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -0,0 +1,613 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2019 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "otx2_common.h" + +/* helper macros to support mcam flows */ +#define OTX2_MAX_NTUPLE_FLOWS 32 +#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_MAX_VLAN_FLOWS 1 + +enum mcam_offset { + NTUPLE_OFFSET = 0, + UNICAST_OFFSET = NTUPLE_OFFSET + OTX2_MAX_NTUPLE_FLOWS, + VLAN_OFFSET = UNICAST_OFFSET + OTX2_MAX_UNICAST_FLOWS, + OTX2_MCAM_COUNT = VLAN_OFFSET + OTX2_MAX_VLAN_FLOWS, +}; + +struct otx2_flow { + struct ethtool_rx_flow_spec flow_spec; + struct list_head list; + u32 location; + u16 entry; + bool is_vf; + int vf; +}; + +int otx2_mcam_flow_init(struct otx2_nic *pf) +{ + INIT_LIST_HEAD(&pf->flows); + + pf->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS; + + pf->flags |= (OTX2_FLAG_NTUPLE_SUPPORT | + OTX2_FLAG_UCAST_FLTR_SUPPORT | OTX2_FLAG_RX_VLAN_SUPPORT); + + pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) + * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); + + if (!pf->mac_table) + return -ENOMEM; + + /* register work queue for ndo callbacks */ + pf->otx2_ndo_wq = create_singlethread_workqueue("otx2_ndo_work_queue"); + if (!pf->otx2_ndo_wq) + return -ENOMEM; + INIT_WORK(&pf->otx2_rx_mode_work, otx2_do_set_rx_mode); + return 0; +} + +void otx2_mcam_flow_del(struct otx2_nic *pf) +{ + otx2_destroy_mcam_flows(pf); + if (pf->otx2_ndo_wq) { + flush_workqueue(pf->otx2_ndo_wq); + destroy_workqueue(pf->otx2_ndo_wq); + } +} + +static int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) +{ + netdev_features_t wanted = NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_CTAG_RX; + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + int i; + + otx2_mbox_lock(&pfvf->mbox); + if (pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC) { + otx2_mbox_unlock(&pfvf->mbox); + return 0; + } + + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->contig = false; + req->count = OTX2_MCAM_COUNT; + + /* Send message to AF */ + if (otx2_sync_mbox_msg(&pfvf->mbox)) { + otx2_mbox_unlock(&pfvf->mbox); + return -EINVAL; + } + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&pfvf->mbox.mbox, 0, &req->hdr); + + if (rsp->count != req->count) { + netdev_info(pfvf->netdev, "number of rules truncated to %d\n", + rsp->count); + netdev_info(pfvf->netdev, + "Disabling RX VLAN offload due to non-availability of MCAM space\n"); + /* support only ntuples here */ + pfvf->ntuple_max_flows = rsp->count; + pfvf->netdev->priv_flags &= ~IFF_UNICAST_FLT; + pfvf->flags &= ~OTX2_FLAG_UCAST_FLTR_SUPPORT; + pfvf->flags &= ~OTX2_FLAG_RX_VLAN_SUPPORT; + pfvf->netdev->features &= ~wanted; + pfvf->netdev->hw_features &= ~wanted; + } + + for (i = 0; i < rsp->count; i++) + pfvf->entry_list[i] = rsp->entry_list[i]; + + pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + otx2_mbox_unlock(&pfvf->mbox); + + return 0; +} + +/* On success adds mcam entry + * On failure enable promisous mode + */ +static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) +{ + struct npc_install_flow_req *req; + int err, i; + + if (!(pf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) { + err = otx2_alloc_mcam_entries(pf); + if (err) + return err; + } + + if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) + return -ENOMEM; + + /* dont have free mcam entries or uc list is greater than alloted */ + if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS) + return -ENOMEM; + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + + /* unicast offset starts with 32 0..31 for ntuple */ + for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + if (pf->mac_table[i].inuse) + continue; + ether_addr_copy(pf->mac_table[i].addr, mac); + pf->mac_table[i].inuse = true; + pf->mac_table[i].mcam_entry = + pf->entry_list[i + UNICAST_OFFSET]; + req->entry = pf->mac_table[i].mcam_entry; + break; + } + + ether_addr_copy(req->packet.dmac, mac); + u64_to_ether_addr(0xffffffffffffull, req->mask.dmac); + req->features = BIT_ULL(NPC_DMAC); + req->channel = pf->hw.rx_chan_base; + req->intf = NIX_INTF_RX; + req->op = NIX_RX_ACTION_DEFAULT; + req->set_cntr = 1; + + err = otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); + + return err; +} + +int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int err; + + err = otx2_do_add_macfilter(pf, mac); + if (err) { + netdev->flags |= IFF_PROMISC; + return err; + } + return 0; +} + +static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, + int *mcam_entry) +{ + int i; + + for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + if (!pf->mac_table[i].inuse) + continue; + + if (ether_addr_equal(pf->mac_table[i].addr, mac)) { + *mcam_entry = pf->mac_table[i].mcam_entry; + pf->mac_table[i].inuse = false; + return true; + } + } + return false; +} + +int otx2_del_macfilter(struct net_device *netdev, const u8 *mac) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct npc_delete_flow_req *req; + int err, mcam_entry; + + /* check does mcam entry exists for given mac */ + if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry)) + return 0; + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + req->entry = mcam_entry; + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); + + return err; +} + +static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location) +{ + struct otx2_flow *iter; + + list_for_each_entry(iter, &pfvf->flows, list) { + if (iter->location == location) + return iter; + } + + return NULL; +} + +static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) +{ + struct list_head *head = &pfvf->flows; + struct otx2_flow *iter; + + list_for_each_entry(iter, &pfvf->flows, list) { + if (iter->location > flow->location) + break; + head = &iter->list; + } + + list_add(&flow->list, head); +} + +int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, + u32 location) +{ + struct otx2_flow *iter; + + if (location >= pfvf->ntuple_max_flows) + return -EINVAL; + + list_for_each_entry(iter, &pfvf->flows, list) { + if (iter->location == location) { + nfc->fs = iter->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + u32 location = 0; + int idx = 0; + int err = 0; + + nfc->data = pfvf->ntuple_max_flows; + while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) { + err = otx2_get_flow(pfvf, nfc, location); + if (!err) + rule_locs[idx++] = location; + location++; + } + + return err; +} + +static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) +{ + u64 ring_cookie = flow->flow_spec.ring_cookie; + struct npc_install_flow_req *req; + int err, vf = 0; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + err = otx2_prepare_flow_request(&flow->flow_spec, req); + if (err) { + /* free the allocated msg above */ + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + + req->entry = flow->entry; + req->intf = NIX_INTF_RX; + req->set_cntr = 1; + req->channel = pfvf->hw.rx_chan_base; + if (ring_cookie == RX_CLS_FLOW_DISC) { + req->op = NIX_RX_ACTIONOP_DROP; + } else { + /* change to unicast only if action of default entry is not + * requested by user + */ + if (req->op != NIX_RX_ACTION_DEFAULT) + req->op = NIX_RX_ACTIONOP_UCAST; + req->index = ethtool_get_flow_spec_ring(ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + if (vf > pci_num_vf(pfvf->pdev)) { + otx2_mbox_unlock(&pfvf->mbox); + return -EINVAL; + } + } + + /* ethtool ring_cookie has (VF + 1) for VF */ + if (vf) { + req->vf = vf; + flow->is_vf = true; + flow->vf = vf; + } + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp) +{ + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + struct otx2_flow *flow; + bool new = false; + int err; + + if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) { + err = otx2_alloc_mcam_entries(pfvf); + if (err) + return err; + } + + if (fsp->location >= pfvf->ntuple_max_flows) + return -EINVAL; + + flow = otx2_find_flow(pfvf, fsp->location); + if (!flow) { + flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + if (!flow) + return -ENOMEM; + flow->location = fsp->location; + flow->entry = pfvf->entry_list[flow->location]; + new = true; + } + /* struct copy */ + flow->flow_spec = *fsp; + + err = otx2_add_flow_msg(pfvf, flow); + if (err) { + if (new) + kfree(flow); + return err; + } + + /* add the new flow installed to list */ + if (new) { + otx2_add_flow_to_list(pfvf, flow); + pfvf->nr_flows++; + } + + return 0; +} + +static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) +{ + struct npc_delete_flow_req *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->entry = entry; + if (all) + req->all = 1; + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) +{ + struct otx2_flow *flow; + int err; + + if (location >= pfvf->ntuple_max_flows) + return -EINVAL; + + flow = otx2_find_flow(pfvf, location); + if (!flow) + return -ENOENT; + + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (err) + return err; + + list_del(&flow->list); + kfree(flow); + pfvf->nr_flows--; + + return 0; +} + +int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) +{ + struct npc_delete_flow_req *req; + struct otx2_flow *iter, *tmp; + int err; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) + return 0; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->start = pfvf->entry_list[NTUPLE_OFFSET]; + req->end = pfvf->entry_list[NTUPLE_OFFSET + + pfvf->ntuple_max_flows - 1]; + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + + list_for_each_entry_safe(iter, tmp, &pfvf->flows, list) { + list_del(&iter->list); + kfree(iter); + pfvf->nr_flows--; + } + return err; +} + +int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) +{ + struct npc_mcam_free_entry_req *req; + struct otx2_flow *iter, *tmp; + int err; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) + return 0; + + /* remove all flows */ + err = otx2_remove_flow_msg(pfvf, 0, true); + if (err) + return err; + + list_for_each_entry_safe(iter, tmp, &pfvf->flows, list) { + list_del(&iter->list); + kfree(iter); + pfvf->nr_flows--; + } + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->all = 1; + /* Send message to AF to free MCAM entries */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + + pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; + otx2_mbox_unlock(&pfvf->mbox); + + return 0; +} + +int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf) +{ + struct npc_install_flow_req *req; + int err; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) + return -ENOMEM; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->entry = pfvf->entry_list[VLAN_OFFSET]; + req->intf = NIX_INTF_RX; + ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr); + u64_to_ether_addr(0xffffffffffffull, req->mask.dmac); + req->channel = pfvf->hw.rx_chan_base; + req->op = NIX_RX_ACTION_DEFAULT; + req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); + req->vtag0_valid = true; + req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf) +{ + struct npc_delete_flow_req *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->entry = pfvf->entry_list[VLAN_OFFSET]; + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} + +int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) +{ + struct nix_vtag_config *req; + struct mbox_msghdr *rsp_hdr; + int err; + + if (!(pf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) { + err = otx2_alloc_mcam_entries(pf); + if (err) + return err; + } + + /* Dont have enough mcam entries */ + if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)) + return -ENOMEM; + + if (enable) { + err = otx2_install_rxvlan_offload_flow(pf); + if (err) + return err; + } else { + err = otx2_delete_rxvlan_offload_flow(pf); + if (err) + return err; + } + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + + /* config strip, capture and size */ + req->vtag_size = VTAGSIZE_T4; + req->cfg_type = 1; /* rx vlan cfg */ + req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req->rx.strip_vtag = enable; + req->rx.capture_vtag = enable; + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) { + otx2_mbox_unlock(&pf->mbox); + return err; + } + + rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp_hdr)) { + otx2_mbox_unlock(&pf->mbox); + return PTR_ERR(rsp_hdr); + } + + otx2_mbox_unlock(&pf->mbox); + return rsp_hdr->rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c new file mode 100644 index 000000000000..0698e2a76434 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -0,0 +1,2548 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/of.h> +#include <linux/if_vlan.h> +#include <net/ip.h> +#include <linux/iommu.h> + +#include "otx2_reg.h" +#include "otx2_common.h" +#include "otx2_txrx.h" +#include "otx2_struct.h" +#include "otx2_ptp.h" + +#define DRV_NAME "octeontx2-nicpf" +#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver" +#define DRV_VERSION "1.0" + +/* Supported devices */ +static const struct pci_device_id otx2_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); + +enum { + TYPE_PFAF, + TYPE_PFVF, +}; + +static int otx2_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool if_up = netif_running(netdev); + int err = 0; + + if (if_up) + otx2_stop(netdev); + + netdev_info(netdev, "Changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (if_up) + err = otx2_open(netdev); + + return err; +} + +static void otx2_disable_flr_me_intr(struct otx2_nic *pf) +{ + int irq, vfs = pf->total_vfs; + + /* Disable VFs ME interrupts */ + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0); + free_irq(irq, pf); + + /* Disable VFs FLR interrupts */ + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0); + free_irq(irq, pf); + + if (vfs <= 64) + return; + + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1); + free_irq(irq, pf); + + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1); + free_irq(irq, pf); +} + +static void otx2_flr_wq_destroy(struct otx2_nic *pf) +{ + if (!pf->flr_wq) + return; + flush_workqueue(pf->flr_wq); + destroy_workqueue(pf->flr_wq); + pf->flr_wq = NULL; +} + +static void otx2_flr_handler(struct work_struct *work) +{ + struct flr_work *flrwork = container_of(work, struct flr_work, work); + struct otx2_nic *pf = flrwork->pf; + struct msg_req *req; + int vf, reg = 0; + + vf = flrwork - pf->flr_wrk; + + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_vf_flr(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return; + } + req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK; + req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK; + + if (!otx2_sync_mbox_msg(&pf->mbox)) { + if (vf >= 64) { + reg = 1; + vf = vf - 64; + } + /* clear transcation pending bit */ + otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); + } + + otx2_mbox_unlock(&pf->mbox); +} + +static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)pf_irq; + int reg, dev, vf, start_vf, num_reg = 1; + u64 intr; + + if (pf->total_vfs > 64) + num_reg = 2; + + for (reg = 0; reg < num_reg; reg++) { + intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg)); + if (!intr) + continue; + start_vf = 64 * reg; + for (vf = 0; vf < 64; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + dev = vf + start_vf; + queue_work(pf->flr_wq, &pf->flr_wrk[dev].work); + /* Clear interrupt */ + otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); + /* Disable the interrupt */ + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg), + BIT_ULL(vf)); + } + } + return IRQ_HANDLED; +} + +static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)pf_irq; + int vf, reg, num_reg = 1; + u64 intr; + + if (pf->total_vfs > 64) + num_reg = 2; + + for (reg = 0; reg < num_reg; reg++) { + intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg)); + if (!intr) + continue; + for (vf = 0; vf < 64; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + /* clear trpend bit */ + otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + /* clear interrupt */ + otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf)); + } + } + return IRQ_HANDLED; +} + +static int otx2_register_flr_me_intr(struct otx2_nic *pf) +{ + struct otx2_hw *hw = &pf->hw; + int vfs = pf->total_vfs; + char *irq_name; + int ret; + + /* Register ME interrupt handler*/ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), + otx2_pf_me_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for ME\n"); + } + + /* Register FLR interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), + otx2_pf_flr_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for FLR\n"); + return ret; + } + + if (pf->total_vfs > 64) { + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", + rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_VFME1), + otx2_pf_me_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for ME1\n"); + } + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", + rvu_get_pf(pf->pcifunc)); + ret = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_VFFLR1), + otx2_pf_flr_intr_handler, 0, irq_name, pf); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for FLR1\n"); + return ret; + } + } + + /* Enable ME interrupt for all VFs*/ + otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(vfs)); + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Enable FLR interrupt for all VFs*/ + otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + if (pf->total_vfs > 64) { + vfs = pf->total_vfs - 64 - 1; + + otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(vfs)); + otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs)); + + otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs)); + otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs)); + } + return 0; +} + +static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) +{ + int vf; + + pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq", WQ_UNBOUND | WQ_HIGHPRI + | WQ_MEM_RECLAIM, 1); + if (!pf->flr_wq) + return -ENOMEM; + + pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs, + sizeof(struct flr_work), GFP_KERNEL); + if (!pf->flr_wrk) { + destroy_workqueue(pf->flr_wq); + return -ENOMEM; + } + + for (vf = 0; vf < num_vfs; vf++) { + pf->flr_wrk[vf].pf = pf; + INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler); + } + + return 0; +} + +static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr, int type) +{ + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + int i; + + for (i = first; i < mdevs; i++) { + /* start from 0 */ + if (!(intr & BIT_ULL(i - first))) + continue; + + mbox = &mw->mbox; + mdev = &mbox->dev[i]; + if (type == TYPE_PFAF) + otx2_sync_mbox_bbuf(mbox, i); + hdr = mdev->mbase + mbox->rx_start; + /* The hdr->num_msgs is set to zero immediately in the interrupt + * handler to ensure that it holds a correct value next time + * when the interrupt handler is called. + * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler + * pf>mbox.up_num_msgs holds the data for use in + * pfaf_mbox_up_handler. + */ + if (hdr->num_msgs) { + mw[i].num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + if (type == TYPE_PFAF) + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), + sizeof(u64))); + + queue_work(mbox_wq, &mw[i].mbox_wrk); + } + + mbox = &mw->mbox_up; + mdev = &mbox->dev[i]; + if (type == TYPE_PFAF) + otx2_sync_mbox_bbuf(mbox, i); + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) { + mw[i].up_num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + if (type == TYPE_PFAF) + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), + sizeof(u64))); + + queue_work(mbox_wq, &mw[i].mbox_up_wrk); + } + } +} + +static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable); +static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); + +static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, + struct otx2_mbox *pfvf_mbox, void *bbuf_base, + int devid) +{ + struct otx2_mbox_dev *src_mdev = mdev; + int offset; + + /* Msgs are already copied, trigger VF's mbox irq */ + smp_wmb(); + + offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); + writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset); + + /* Restore VF's mbox bounce buffer region address */ + src_mdev->mbase = bbuf_base; +} + +static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, + struct otx2_mbox *src_mbox, + int dir, int vf, int num_msgs) +{ + struct otx2_mbox_dev *src_mdev, *dst_mdev; + struct mbox_hdr *mbox_hdr; + struct mbox_hdr *req_hdr; + struct mbox *dst_mbox; + int dst_size, err; + + if (dir == MBOX_DIR_PFAF) { + /* Set VF's mailbox memory as PF's bounce buffer memory, so + * that explicit copying of VF's msgs to PF=>AF mbox region + * and AF=>PF responses to VF's mbox region can be avoided. + */ + src_mdev = &src_mbox->dev[vf]; + mbox_hdr = src_mbox->hwbase + + src_mbox->rx_start + (vf * MBOX_SIZE); + + dst_mbox = &pf->mbox; + dst_size = dst_mbox->mbox.tx_size - + ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); + /* Check if msgs fit into destination area */ + if (mbox_hdr->msg_size > dst_size) + return -EINVAL; + + dst_mdev = &dst_mbox->mbox.dev[0]; + + otx2_mbox_lock(&pf->mbox); + dst_mdev->mbase = src_mdev->mbase; + dst_mdev->msg_size = mbox_hdr->msg_size; + dst_mdev->num_msgs = num_msgs; + err = otx2_sync_mbox_msg(dst_mbox); + if (err) { + dev_warn(pf->dev, + "AF not responding to VF%d messages\n", vf); + /* restore PF mbase and exit */ + dst_mdev->mbase = pf->mbox.bbuf_base; + otx2_mbox_unlock(&pf->mbox); + return err; + } + /* At this point, all the VF messages sent to AF are acked + * with proper responses and responses are copied to VF + * mailbox hence raise interrupt to VF. + */ + req_hdr = (struct mbox_hdr *)(dst_mdev->mbase + + dst_mbox->mbox.rx_start); + req_hdr->num_msgs = num_msgs; + + otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox, + pf->mbox.bbuf_base, vf); + otx2_mbox_unlock(&pf->mbox); + } else if (dir == MBOX_DIR_PFVF_UP) { + src_mdev = &src_mbox->dev[0]; + mbox_hdr = src_mbox->hwbase + src_mbox->rx_start; + req_hdr = (struct mbox_hdr *)(src_mdev->mbase + + src_mbox->rx_start); + req_hdr->num_msgs = num_msgs; + + dst_mbox = &pf->mbox_pfvf[0]; + dst_size = dst_mbox->mbox_up.tx_size - + ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); + /* Check if msgs fit into destination area */ + if (mbox_hdr->msg_size > dst_size) + return -EINVAL; + + dst_mdev = &dst_mbox->mbox_up.dev[vf]; + dst_mdev->mbase = src_mdev->mbase; + dst_mdev->msg_size = mbox_hdr->msg_size; + dst_mdev->num_msgs = mbox_hdr->num_msgs; + err = otx2_sync_mbox_up_msg(dst_mbox, vf); + if (err) { + dev_warn(pf->dev, + "VF%d is not responding to mailbox\n", vf); + return err; + } + } else if (dir == MBOX_DIR_VFPF_UP) { + req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase + + src_mbox->rx_start); + req_hdr->num_msgs = num_msgs; + otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf], + &pf->mbox.mbox_up, + pf->mbox_pfvf[vf].bbuf_base, + 0); + } + + return 0; +} + +static void otx2_pfvf_mbox_handler(struct work_struct *work) +{ + struct mbox_msghdr *msg = NULL; + int offset, vf_idx, id, err; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *req_hdr; + struct otx2_mbox *mbox; + struct mbox *vf_mbox; + struct otx2_nic *pf; + + vf_mbox = container_of(work, struct mbox, mbox_wrk); + pf = vf_mbox->pfvf; + vf_idx = vf_mbox - pf->mbox_pfvf; + + mbox = &pf->mbox_pfvf[0].mbox; + mdev = &mbox->dev[vf_idx]; + req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + + offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < vf_mbox->num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + + offset); + + if (msg->sig != OTX2_MBOX_REQ_SIG) + goto inval_msg; + + /* Set VF's number in each of the msg */ + msg->pcifunc &= RVU_PFVF_FUNC_MASK; + msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; + offset = msg->next_msgoff; + } + err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx, + vf_mbox->num_msgs); + if (err) + goto inval_msg; + return; + +inval_msg: + otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id); + otx2_mbox_msg_send(mbox, vf_idx); +} + +static void otx2_pfvf_mbox_up_handler(struct work_struct *work) +{ + struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk); + struct otx2_nic *pf = vf_mbox->pfvf; + struct otx2_mbox_dev *mdev; + int offset, id, vf_idx = 0; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + + vf_idx = vf_mbox - pf->mbox_pfvf; + mbox = &pf->mbox_pfvf[0].mbox_up; + mdev = &mbox->dev[vf_idx]; + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < vf_mbox->up_num_msgs; id++) { + msg = mdev->mbase + offset; + + if (msg->id >= MBOX_MSG_MAX) { + dev_err(pf->dev, + "Mbox msg with unknown ID 0x%x\n", msg->id); + goto end; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(pf->dev, + "Mbox msg with wrong signature %x, ID 0x%x\n", + msg->sig, msg->id); + goto end; + } + + switch (msg->id) { + case MBOX_MSG_CGX_LINK_EVENT: + break; + default: + if (msg->rc) + dev_err(pf->dev, + "Mbox msg response has err %d, ID 0x%x\n", + msg->rc, msg->id); + break; + } + +end: + offset = mbox->rx_start + msg->next_msgoff; + mdev->msgs_acked++; + } + + otx2_mbox_reset(mbox, vf_idx); +} + +static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); + int vfs = pf->total_vfs; + struct mbox *mbox; + u64 intr; + + mbox = pf->mbox_pfvf; + /* Handle VF interrupts */ + if (vfs > 64) { + intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); + otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, + TYPE_PFVF); + vfs -= 64; + } + + intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); + + otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); + + return IRQ_HANDLED; +} + +static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) +{ + void __iomem *hwbase; + struct mbox *mbox; + int err, vf; + u64 base; + + if (!numvfs) + return -EINVAL; + + pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs, + sizeof(struct mbox), GFP_KERNEL); + if (!pf->mbox_pfvf) + return -ENOMEM; + + pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 1); + if (!pf->mbox_pfvf_wq) + return -ENOMEM; + + base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR)); + hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); + + if (!hwbase) { + err = -ENOMEM; + goto free_wq; + } + + mbox = &pf->mbox_pfvf[0]; + err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFVF, numvfs); + if (err) + goto free_iomem; + + err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFVF_UP, numvfs); + if (err) + goto free_iomem; + + for (vf = 0; vf < numvfs; vf++) { + mbox->pfvf = pf; + INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler); + INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler); + mbox++; + } + + return 0; + +free_iomem: + if (hwbase) + iounmap(hwbase); +free_wq: + destroy_workqueue(pf->mbox_pfvf_wq); + return err; +} + +static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox_pfvf[0]; + + if (!mbox) + return; + + if (pf->mbox_pfvf_wq) { + flush_workqueue(pf->mbox_pfvf_wq); + destroy_workqueue(pf->mbox_pfvf_wq); + pf->mbox_pfvf_wq = NULL; + } + + if (mbox->mbox.hwbase) + iounmap(mbox->mbox.hwbase); + + otx2_mbox_destroy(&mbox->mbox); +} + +static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf) +{ + int bits; + + /* Clear PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); + + /* Enable PF <=> VF mailbox IRQ */ + bits = ((pf->total_vfs - 1) % 64); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), + GENMASK_ULL(bits, 0)); + + if (pf->total_vfs > 64) { + bits = pf->total_vfs - 64 - 1; + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), + GENMASK_ULL(bits, 0)); + } +} + +static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf) +{ + int vector; + + /* Disable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); + + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0); + free_irq(vector, pf); + + if (pf->total_vfs > 64) { + otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1); + free_irq(vector, pf); + } +} + +static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf) +{ + struct otx2_hw *hw = &pf->hw; + char *irq_name; + int err; + + /* Register MBOX0 interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); + err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), + otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFAF mbox0 irq\n"); + return err; + } + + if (pf->total_vfs > 64) { + /* Register MBOX1 interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); + err = request_irq(pci_irq_vector(pf->pdev, + RVU_PF_INT_VEC_VFPF_MBOX1), + otx2_pfvf_mbox_intr_handler, + 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFAF mbox1 irq\n"); + return err; + } + } + + otx2_enable_pfvf_mbox_intr(pf); + + return 0; +} + +static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, + struct mbox_msghdr *msg) +{ + int devid; + + if (msg->id >= MBOX_MSG_MAX) { + dev_err(pf->dev, + "Mbox msg with unknown ID 0x%x\n", msg->id); + return; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(pf->dev, + "Mbox msg with wrong signature %x, ID 0x%x\n", + msg->sig, msg->id); + return; + } + + /* message response heading VF */ + devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; + if (devid) { + struct otx2_vf_config *config = &pf->vf_configs[devid - 1]; + struct delayed_work *dwork; + + switch (msg->id) { + case MBOX_MSG_NIX_LF_START_RX: + config->intf_down = false; + dwork = &config->link_event_work; + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + break; + case MBOX_MSG_NIX_LF_STOP_RX: + config->intf_down = true; + break; + } + + return; + } + + switch (msg->id) { + case MBOX_MSG_READY: + pf->pcifunc = msg->pcifunc; + break; + case MBOX_MSG_MSIX_OFFSET: + mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg); + break; + case MBOX_MSG_NPA_LF_ALLOC: + mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_LF_ALLOC: + mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_TXSCH_ALLOC: + mbox_handler_nix_txsch_alloc(pf, + (struct nix_txsch_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_BP_ENABLE: + mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg); + break; + case MBOX_MSG_CGX_STATS: + mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); + break; + case MBOX_MSG_CGX_FEC_STATS: + mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg); + break; + default: + if (msg->rc) + dev_err(pf->dev, + "Mbox msg response has err %d, ID 0x%x\n", + msg->rc, msg->id); + break; + } +} + +static void otx2_pfaf_mbox_handler(struct work_struct *work) +{ + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + struct mbox *af_mbox; + struct otx2_nic *pf; + int offset, id; + + af_mbox = container_of(work, struct mbox, mbox_wrk); + mbox = &af_mbox->mbox; + mdev = &mbox->dev[0]; + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + pf = af_mbox->pfvf; + + for (id = 0; id < af_mbox->num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + otx2_process_pfaf_mbox_msg(pf, msg); + offset = mbox->rx_start + msg->next_msgoff; + mdev->msgs_acked++; + } + + otx2_mbox_reset(mbox, 0); +} + +static void otx2_handle_link_event(struct otx2_nic *pf) +{ + struct cgx_link_user_info *linfo = &pf->linfo; + struct net_device *netdev = pf->netdev; + + pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, + linfo->link_up ? "UP" : "DOWN", linfo->speed, + linfo->full_duplex ? "Full" : "Half"); + if (linfo->link_up) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } +} + +int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, + struct cgx_link_info_msg *msg, + struct msg_rsp *rsp) +{ + int i; + + pf->linfo = msg->link_info; + + /* notify VFs about link event */ + for (i = 0; i < pci_num_vf(pf->pdev); i++) { + struct otx2_vf_config *config = &pf->vf_configs[i]; + struct delayed_work *dwork = &config->link_event_work; + + if (config->intf_down) + continue; + + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + } + + /* interface has not been fully configured yet */ + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return 0; + + otx2_handle_link_event(pf); + return 0; +} + +int otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_nic *pf, + struct cgx_ptp_rx_info_msg *msg, + struct msg_rsp *rsp) +{ + int i; + + pf->ptp->ptp_en = msg->ptp_en; + + /* notify VFs about ptp event */ + for (i = 0; i < pci_num_vf(pf->pdev); i++) { + struct otx2_vf_config *config = &pf->vf_configs[i]; + struct delayed_work *dwork = &config->ptp_info_work; + + if (config->intf_down) + continue; + + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + } + return 0; +} + +static int otx2_process_mbox_msg_up(struct otx2_nic *pf, + struct mbox_msghdr *req) +{ + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) { + otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + + switch (req->id) { +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + &pf->mbox.mbox_up, 0, \ + sizeof(struct _rsp_type)); \ + if (!rsp) \ + return -ENOMEM; \ + \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = 0; \ + rsp->hdr.rc = 0; \ + \ + err = otx2_mbox_up_handler_ ## _fn_name( \ + pf, (struct _req_type *)req, rsp); \ + return err; \ + } +MBOX_UP_CGX_MESSAGES +#undef M + break; + default: + otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + return 0; +} + +static void otx2_pfaf_mbox_up_handler(struct work_struct *work) +{ + struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk); + struct otx2_mbox *mbox = &af_mbox->mbox_up; + struct otx2_mbox_dev *mdev = &mbox->dev[0]; + struct otx2_nic *pf = af_mbox->pfvf; + int offset, id, devid = 0; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < af_mbox->up_num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + + devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; + /* Skip processing VF's messages */ + if (!devid) + otx2_process_mbox_msg_up(pf, msg); + offset = mbox->rx_start + msg->next_msgoff; + } + if (devid) { + otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, + MBOX_DIR_PFVF_UP, devid - 1, + af_mbox->up_num_msgs); + return; + } + + otx2_mbox_msg_send(mbox, 0); +} + +static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = (struct otx2_nic *)pf_irq; + struct mbox *mbox; + + /* Clear the IRQ */ + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + + mbox = &pf->mbox; + otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); + + return IRQ_HANDLED; +} + +static void otx2_disable_mbox_intr(struct otx2_nic *pf) +{ + int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + + /* Disable AF => PF mailbox IRQ */ + otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + free_irq(vector, pf); +} + +static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) +{ + struct otx2_hw *hw = &pf->hw; + struct msg_req *req; + char *irq_name; + int err; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); + err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), + otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFAF mbox irq\n"); + return err; + } + + /* Enable mailbox interrupt for msgs coming from AF. + * First clear to avoid spurious interrupts, if any. + */ + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + + if (!probe_af) + return 0; + + /* Check mailbox communication with AF */ + req = otx2_mbox_alloc_msg_ready(&pf->mbox); + if (!req) { + otx2_disable_mbox_intr(pf); + return -ENOMEM; + } + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) { + dev_warn(pf->dev, + "AF not responding to mailbox, deferring probe\n"); + otx2_disable_mbox_intr(pf); + return -EPROBE_DEFER; + } + + return 0; +} + +static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + + if (pf->mbox_wq) { + flush_workqueue(pf->mbox_wq); + destroy_workqueue(pf->mbox_wq); + pf->mbox_wq = NULL; + } + + if (mbox->mbox.hwbase) + iounmap((void __iomem *)mbox->mbox.hwbase); + + otx2_mbox_destroy(&mbox->mbox); + otx2_mbox_destroy(&mbox->mbox_up); +} + +static int otx2_pfaf_mbox_init(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + void __iomem *hwbase; + int err; + + mbox->pfvf = pf; + pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 1); + if (!pf->mbox_wq) + return -ENOMEM; + + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e AF) and this PF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), + pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM)); + if (!hwbase) { + dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFAF, 1); + if (err) + goto exit; + + err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, + MBOX_DIR_PFAF_UP, 1); + if (err) + goto exit; + + err = otx2_mbox_bbuf_init(mbox, pf->pdev); + if (err) + goto exit; + + INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler); + INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler); + otx2_mbox_lock_init(&pf->mbox); + + return 0; +exit: + destroy_workqueue(pf->mbox_wq); + return err; +} + +static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) +{ + struct msg_req *msg; + int err; + + otx2_mbox_lock(&pf->mbox); + if (enable) + msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); + else + msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); + + if (!msg) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); + return err; +} + +static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) +{ + struct msg_req *msg; + int err; + + otx2_mbox_lock(&pf->mbox); + if (enable) + msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); + else + msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); + + if (!msg) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); + return err; +} + +int otx2_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues) +{ + int err; + + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, + "Failed to set no of Tx queues: %d\n", tx_queues); + return err; + } + + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, + "Failed to set no of Rx queues: %d\n", rx_queues); + return err; +} +EXPORT_SYMBOL(otx2_set_real_num_queues); + +static irqreturn_t otx2_q_intr_handler(int irq, void *data) +{ + struct otx2_nic *pf = data; + u64 val, *ptr; + u64 qidx = 0; + + /* CQ */ + for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { + ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT); + val = otx2_atomic64_add((qidx << 44), ptr); + + otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | + (val & NIX_CQERRINT_BITS)); + if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42)))) + continue; + + if (val & BIT_ULL(42)) { + netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + qidx, otx2_read64(pf, NIX_LF_ERR_INT)); + } else { + if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) + netdev_err(pf->netdev, "CQ%lld: Doorbell error", + qidx); + if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) + netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", + qidx); + } + + schedule_work(&pf->reset_task); + } + + /* SQ */ + for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); + val = otx2_atomic64_add((qidx << 44), ptr); + otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | + (val & NIX_SQINT_BITS)); + + if (!(val & (NIX_SQINT_BITS | BIT_ULL(42)))) + continue; + + if (val & BIT_ULL(42)) { + netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + qidx, otx2_read64(pf, NIX_LF_ERR_INT)); + } else { + if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) { + netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx", + qidx, + otx2_read64(pf, + NIX_LF_SQ_OP_ERR_DBG)); + otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, + BIT_ULL(44)); + } + if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) { + netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n", + qidx, + otx2_read64(pf, NIX_LF_MNQ_ERR_DBG)); + otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, + BIT_ULL(44)); + } + if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) { + netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx", + qidx, + otx2_read64(pf, + NIX_LF_SEND_ERR_DBG)); + otx2_write64(pf, NIX_LF_SEND_ERR_DBG, + BIT_ULL(44)); + } + if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) + netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", + qidx); + } + + schedule_work(&pf->reset_task); + } + + return IRQ_HANDLED; +} + +static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) +{ + struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq; + struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; + int qidx = cq_poll->cint_idx; + + /* Disable interrupts. + * + * Completion interrupts behave in a level-triggered interrupt + * fashion, and hence have to be cleared only after it is serviced. + */ + otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + /* Schedule NAPI */ + napi_schedule_irqoff(&cq_poll->napi); + + return IRQ_HANDLED; +} + +static void otx2_disable_napi(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct otx2_cq_poll *cq_poll; + int qidx; + + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + napi_disable(&cq_poll->napi); + netif_napi_del(&cq_poll->napi); + } +} + +static void otx2_free_cq_res(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct mbox *mbox = &pf->mbox; + struct otx2_cq_queue *cq; + int qidx; + + /* Disable CQs*/ + otx2_ctx_disable(mbox, NIX_AQ_CTYPE_CQ, false); + for (qidx = 0; qidx < qset->cq_cnt; qidx++) { + cq = &qset->cq[qidx]; + qmem_free(pf->dev, cq->cqe); + } +} + +static void otx2_free_sq_res(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct mbox *mbox = &pf->mbox; + struct otx2_snd_queue *sq; + int qidx; + + /* Disable SQs */ + otx2_ctx_disable(mbox, NIX_AQ_CTYPE_SQ, false); + /* Free SQB pointers */ + otx2_sq_free_sqbs(pf); + for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + sq = &qset->sq[qidx]; + qmem_free(pf->dev, sq->sqe); + qmem_free(pf->dev, sq->tso_hdrs); + kfree(sq->sg); + kfree(sq->sqb_ptrs); + qmem_free(pf->dev, sq->timestamps); + } +} + +static int otx2_init_hw_resources(struct otx2_nic *pf) +{ + struct nix_lf_free_req *free_req; + struct mbox *mbox = &pf->mbox; + struct otx2_hw *hw = &pf->hw; + struct msg_req *req; + int err = 0, lvl; + + /* Set required NPA LF's pool counts + * Auras and Pools are used in a 1:1 mapping, + * so, aura count = pool count. + */ + hw->rqpool_cnt = hw->rx_queues; + hw->sqpool_cnt = hw->tx_queues; + hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; + + otx2_mbox_lock(mbox); + /* NPA init */ + err = otx2_config_npa(pf); + if (err) + goto exit; + + /* NIX init */ + err = otx2_config_nix(pf); + if (err) + goto err_free_npa_lf; + + /* Enable backpressure */ + otx2_nix_config_bp(pf, true); + + /* Init Auras and pools used by NIX RQ, for free buffer ptrs */ + err = otx2_rq_aura_pool_init(pf); + if (err) { + otx2_mbox_unlock(mbox); + goto err_free_nix_lf; + } + /* Init Auras and pools used by NIX SQ, for queueing SQEs */ + err = otx2_sq_aura_pool_init(pf); + if (err) { + otx2_mbox_unlock(mbox); + goto err_free_rq_ptrs; + } + + err = otx2_txsch_alloc(pf); + if (err) { + otx2_mbox_unlock(mbox); + goto err_free_sq_ptrs; + } + + err = otx2_config_nix_queues(pf); + if (err) { + otx2_mbox_unlock(mbox); + goto err_free_txsch; + } + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + err = otx2_txschq_config(pf, lvl); + if (err) { + otx2_mbox_unlock(mbox); + goto err_free_nix_queues; + } + } + otx2_mbox_unlock(mbox); + return err; + +err_free_nix_queues: + otx2_free_sq_res(pf); + otx2_free_cq_res(pf); + otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); +err_free_txsch: + if (otx2_txschq_stop(pf)) + dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__); +err_free_sq_ptrs: + otx2_sq_free_sqbs(pf); +err_free_rq_ptrs: + otx2_free_aura_ptr(pf, AURA_NIX_RQ); + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); + otx2_aura_pool_free(pf); +err_free_nix_lf: + otx2_mbox_lock(mbox); + free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + if (free_req) { + free_req->flags = NIX_LF_DISABLE_FLOWS; + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free nixlf\n", __func__); + } +err_free_npa_lf: + /* Reset NPA LF */ + req = otx2_mbox_alloc_msg_npa_lf_free(mbox); + if (req) { + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free npalf\n", __func__); + } +exit: + otx2_mbox_unlock(mbox); + return err; +} + +static void otx2_free_hw_resources(struct otx2_nic *pf) +{ + struct otx2_qset *qset = &pf->qset; + struct nix_lf_free_req *free_req; + struct mbox *mbox = &pf->mbox; + struct otx2_cq_queue *cq; + struct msg_req *req; + int qidx, err; + + /* Stop transmission */ + err = otx2_txschq_stop(pf); + if (err) + dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n"); + + otx2_mbox_lock(mbox); + /* Disable backpressure */ + if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + otx2_nix_config_bp(pf, false); + otx2_mbox_unlock(mbox); + + otx2_free_sq_res(pf); + + /* Disable RQs */ + otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); + + /*Dequeue all CQEs */ + for (qidx = 0; qidx < qset->cq_cnt; qidx++) { + cq = &qset->cq[qidx]; + if (cq->cq_type == CQ_RX) + otx2_cleanup_rx_cqes(pf, cq); + else + otx2_cleanup_tx_cqes(pf, cq); + } + + /* Free RQ buffer pointers*/ + otx2_free_aura_ptr(pf, AURA_NIX_RQ); + + otx2_free_cq_res(pf); + + otx2_mbox_lock(mbox); + /* Reset NIX LF */ + free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + if (free_req) { + free_req->flags = NIX_LF_DISABLE_FLOWS; + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free nixlf\n", __func__); + } + otx2_mbox_unlock(mbox); + + /* Disable NPA Pool and Aura hw context */ + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); + otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); + otx2_aura_pool_free(pf); + + otx2_mbox_lock(mbox); + /* Reset NPA LF */ + req = otx2_mbox_alloc_msg_npa_lf_free(mbox); + if (req) { + if (otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "%s failed to free npalf\n", __func__); + } + otx2_mbox_unlock(mbox); +} + +static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) + +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct otx2_snd_queue *sq; + int qidx = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); + + /* Check for minimum and maximum packet length */ + if (skb->len <= ETH_HLEN || + (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + sq = &pf->qset.sq[qidx]; + + if (netif_tx_queue_stopped(txq)) { + dev_kfree_skb(skb); + } else if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + netif_tx_stop_queue(txq); + + /* Check again, incase SQBs got freed up */ + smp_mb(); + if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) + > sq->sqe_thresh) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +int otx2_open(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + int err = 0, qidx, vec; + char *irq_name; + + netif_carrier_off(netdev); + + pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues; + /* RQ and SQs are mapped to different CQs, + * so find out max CQ IRQs (i.e CINTs) needed. + */ + pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues); + qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL); + if (!qset->napi) + return -ENOMEM; + + /* CQ size of RQ */ + qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256); + /* CQ size of SQ */ + qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); + + err = -ENOMEM; + qset->cq = kcalloc(pf->qset.cq_cnt, + sizeof(struct otx2_cq_queue), GFP_KERNEL); + if (!qset->cq) + goto err_free_mem; + + qset->sq = kcalloc(pf->hw.tx_queues, + sizeof(struct otx2_snd_queue), GFP_KERNEL); + if (!qset->sq) + goto err_free_mem; + + qset->rq = kcalloc(pf->hw.rx_queues, + sizeof(struct otx2_rcv_queue), GFP_KERNEL); + if (!qset->rq) + goto err_free_mem; + + err = otx2_init_hw_resources(pf); + if (err) + goto err_free_mem; + + /* Register NAPI handler */ + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + cq_poll->cint_idx = qidx; + /* RQ0 & SQ0 are mapped to CINT0 and so on.. + * 'cq_ids[0]' points to RQ's CQ and + * 'cq_ids[1]' points to SQ's CQ and + */ + cq_poll->cq_ids[CQ_RX] = + (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? + qidx + pf->hw.rx_queues : CINT_INVALID_CQ; + cq_poll->dev = (void *)pf; + netif_napi_add(netdev, &cq_poll->napi, + otx2_napi_handler, NAPI_POLL_WEIGHT); + napi_enable(&cq_poll->napi); + } + + /* Set default MTU in HW */ + err = otx2_hw_set_mtu(pf, netdev->mtu); + if (err) + goto err_disable_napi; + + /* Initialize RSS */ + err = otx2_rss_init(pf); + if (err) + goto err_disable_napi; + + /* Register Queue IRQ handlers */ + vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START; + irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name); + + err = request_irq(pci_irq_vector(pf->pdev, vec), + otx2_q_intr_handler, 0, irq_name, pf); + if (err) { + dev_err(pf->dev, + "RVUPF%d: IRQ registration failed for QERR\n", + rvu_get_pf(pf->pcifunc)); + goto err_disable_napi; + } + + /* Enable QINT IRQ */ + otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); + + /* Register CQ IRQ handlers */ + vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, + qidx); + + err = request_irq(pci_irq_vector(pf->pdev, vec), + otx2_cq_intr_handler, 0, irq_name, + &qset->napi[qidx]); + if (err) { + dev_err(pf->dev, + "RVUPF%d: IRQ registration failed for CQ%d\n", + rvu_get_pf(pf->pcifunc), qidx); + goto err_free_cints; + } + vec++; + + otx2_config_irq_coalescing(pf, qidx); + + /* Enable CQ IRQ */ + otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); + otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); + } + + otx2_set_cints_affinity(pf); + + pf->flags &= ~OTX2_FLAG_INTF_DOWN; + /* 'intf_down' may be checked on any cpu */ + smp_wmb(); + + /* we have already received link status notification */ + if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + otx2_handle_link_event(pf); + + if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + otx2_enable_rxvlan(pf, true); + + /* When reinitializing enable time stamping if it was enabled before */ + if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { + pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; + otx2_config_hw_tx_tstamp(pf, true); + } + if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) { + pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; + otx2_config_hw_rx_tstamp(pf, true); + } + + /* Set NPC parsing mode, skip LBKs */ + if (!is_otx2_lbkvf(pf->pdev)) { + err = otx2_set_npc_parse_mode(pf); + if (err) + goto err_free_cints; + } + + err = otx2_rxtx_enable(pf, true); + if (err) + goto err_free_cints; + + return 0; + +err_free_cints: + otx2_free_cints(pf, qidx); + vec = pci_irq_vector(pf->pdev, + pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); + otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); + synchronize_irq(vec); + free_irq(vec, pf); +err_disable_napi: + otx2_disable_napi(pf); + otx2_free_hw_resources(pf); +err_free_mem: + kfree(qset->sq); + kfree(qset->cq); + kfree(qset->napi); + return err; +} +EXPORT_SYMBOL(otx2_open); + +int otx2_stop(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + int qidx, vec, wrk; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + pf->flags |= OTX2_FLAG_INTF_DOWN; + /* 'intf_down' may be checked on any cpu */ + smp_wmb(); + + /* First stop packet Rx/Tx */ + otx2_rxtx_enable(pf, false); + + /* Cleanup Queue IRQ */ + vec = pci_irq_vector(pf->pdev, + pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); + otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); + synchronize_irq(vec); + free_irq(vec, pf); + + /* Cleanup CQ NAPI and IRQ */ + vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { + /* Disable interrupt */ + otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + synchronize_irq(pci_irq_vector(pf->pdev, vec)); + + cq_poll = &qset->napi[qidx]; + napi_synchronize(&cq_poll->napi); + vec++; + } + + netif_tx_disable(netdev); + + otx2_free_hw_resources(pf); + otx2_free_cints(pf, pf->hw.cint_cnt); + otx2_disable_napi(pf); + + for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) + netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); + + for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) + cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); + devm_kfree(pf->dev, pf->refill_wrk); + + kfree(qset->sq); + kfree(qset->cq); + kfree(qset->rq); + kfree(qset->napi); + /* Do not clear RQ/SQ ringsize settings */ + memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0, + sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt)); + return 0; +} +EXPORT_SYMBOL(otx2_stop); + +static netdev_features_t otx2_fix_features(struct net_device *dev, + netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_STAG_RX; + else + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + return features; +} + +static void otx2_set_rx_mode(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + queue_work(pf->otx2_ndo_wq, &pf->otx2_rx_mode_work); +} + +void otx2_do_set_rx_mode(struct work_struct *work) +{ + struct otx2_nic *pf = container_of(work, struct otx2_nic, + otx2_rx_mode_work); + struct net_device *netdev = pf->netdev; + struct nix_rx_mode *req; + + if (!(netdev->flags & IFF_UP)) + return; + + /* Write unicast address to mcam entries or del from mcam */ + if (netdev->priv_flags & IFF_UNICAST_FLT) + __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return; + } + + req->mode = NIX_RX_MODE_UCAST; + + if (netdev->flags & IFF_PROMISC) + req->mode |= NIX_RX_MODE_PROMISC; + else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + req->mode |= NIX_RX_MODE_ALLMULTI; + + otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); +} + +static void otx2_reset_task(struct work_struct *work) +{ + struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task); + + if (!netif_running(pf->netdev)) + return; + + otx2_stop(pf->netdev); + pf->reset_count++; + otx2_open(pf->netdev); + netif_trans_update(pf->netdev); +} + +static int otx2_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct otx2_nic *pf = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + bool ntuple = !!(features & NETIF_F_NTUPLE); + + if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) + return otx2_cgx_config_loopback(pf, + features & NETIF_F_LOOPBACK); + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev)) + return otx2_enable_rxvlan(pf, + features & NETIF_F_HW_VLAN_CTAG_RX); + + if ((changed & NETIF_F_NTUPLE) && !ntuple) + otx2_destroy_ntuple_flows(pf); + + return 0; +} + +static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable) +{ + struct msg_req *req; + int err; + + if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable) + return 0; + + otx2_mbox_lock(&pfvf->mbox); + if (enable) + req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + + otx2_mbox_unlock(&pfvf->mbox); + if (enable) + pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED; + else + pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; + return 0; +} + +static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) +{ + struct msg_req *req; + int err; + + if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable) + return 0; + + otx2_mbox_lock(&pfvf->mbox); + if (enable) + req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + + otx2_mbox_unlock(&pfvf->mbox); + if (enable) + pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED; + else + pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; + return 0; +} + +static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + struct hwtstamp_config config; + + if (!pfvf->ptp) + return -ENODEV; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + otx2_config_hw_tx_tstamp(pfvf, false); + break; + case HWTSTAMP_TX_ON: + otx2_config_hw_tx_tstamp(pfvf, true); + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + otx2_config_hw_rx_tstamp(pfvf, false); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + otx2_config_hw_rx_tstamp(pfvf, true); + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return otx2_config_hwtstamp(netdev, req); + default: + return -EOPNOTSUPP; + } +} + +static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) +{ + struct npc_install_flow_req *req; + int err; + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + + ether_addr_copy(req->packet.dmac, mac); + u64_to_ether_addr(0xffffffffffffull, req->mask.dmac); + req->features = BIT_ULL(NPC_DMAC); + req->channel = pf->hw.rx_chan_base; + req->intf = NIX_INTF_RX; + req->default_rule = 1; + req->append = 1; + req->vf = vf + 1; + req->op = NIX_RX_ACTION_DEFAULT; + + err = otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); + + return err; +} + +static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + struct otx2_vf_config *config; + + if (!netif_running(netdev)) + return -EAGAIN; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + config = &pf->vf_configs[vf]; + ether_addr_copy(config->mac, mac); + + return otx2_do_set_vf_mac(pf, vf, mac); +} + +static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos) +{ + struct npc_install_flow_req *req; + int err; + + otx2_mbox_lock(&pf->mbox); + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + otx2_mbox_unlock(&pf->mbox); + return -ENOMEM; + } + + req->packet.vlan_tci = htons(vlan); + req->mask.vlan_tci = htons(VLAN_VID_MASK); + req->features = BIT_ULL(NPC_OUTER_VID); + req->channel = pf->hw.rx_chan_base; + req->intf = NIX_INTF_RX; + req->default_rule = 1; + req->append = 1; + req->vf = vf + 1; + req->op = NIX_RX_ACTION_DEFAULT; + + err = otx2_sync_mbox_msg(&pf->mbox); + otx2_mbox_unlock(&pf->mbox); + + return err; +} + +static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 proto) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + struct otx2_vf_config *config; + + if (!netif_running(netdev)) + return -EAGAIN; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + /* qos is currently unsupported */ + if (vlan >= VLAN_N_VID || qos) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + config = &pf->vf_configs[vf]; + config->vlan = vlan; + + return otx2_do_set_vf_vlan(pf, vf, vlan, qos); +} + +static int otx2_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + struct otx2_vf_config *config; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + config = &pf->vf_configs[vf]; + ivi->vf = vf; + ether_addr_copy(ivi->mac, config->mac); + ivi->vlan = config->vlan; + + return 0; +} + +static netdev_features_t +otx2_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + return features; +} + +static const struct net_device_ops otx2_netdev_ops = { + .ndo_open = otx2_open, + .ndo_stop = otx2_stop, + .ndo_start_xmit = otx2_xmit, + .ndo_fix_features = otx2_fix_features, + .ndo_set_mac_address = otx2_set_mac_address, + .ndo_change_mtu = otx2_change_mtu, + .ndo_set_rx_mode = otx2_set_rx_mode, + .ndo_set_features = otx2_set_features, + .ndo_tx_timeout = otx2_tx_timeout, + .ndo_get_stats64 = otx2_get_stats64, + .ndo_do_ioctl = otx2_ioctl, + .ndo_set_vf_mac = otx2_set_vf_mac, + .ndo_set_vf_vlan = otx2_set_vf_vlan, + .ndo_get_vf_config = otx2_get_vf_config, + .ndo_features_check = otx2_features_check, +}; + +static int otx2_check_pf_usable(struct otx2_nic *nic) +{ + u64 rev; + + rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM)); + rev = (rev >> 12) & 0xFF; + /* Check if AF has setup revision for RVUM block, + * otherwise this driver probe should be deferred + * until AF driver comes up. + */ + if (!rev) { + dev_warn(nic->dev, + "AF is not initialized, deferring probe\n"); + return -EPROBE_DEFER; + } + return 0; +} + +static int otx2_realloc_msix_vectors(struct otx2_nic *pf) +{ + struct otx2_hw *hw = &pf->hw; + int num_vec, err; + + num_vec = hw->nix_msixoff; + num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; + + otx2_disable_mbox_intr(pf); + pci_free_irq_vectors(hw->pdev); + pci_free_irq_vectors(hw->pdev); + err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", + __func__, num_vec); + return err; + } + + err = otx2_register_mbox_intr(pf, false); + if (err) + return err; + return 0; +} + +static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct otx2_nic *pf; + struct otx2_hw *hw; + int err, qcount; + int num_vec; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set DMA mask\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set consistent DMA mask\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + /* Set number of queues */ + qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT); + + netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount); + if (!netdev) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + pf = netdev_priv(netdev); + pf->netdev = netdev; + pf->pdev = pdev; + pf->dev = dev; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + pf->flags |= OTX2_FLAG_INTF_DOWN; + + hw = &pf->hw; + hw->pdev = pdev; + hw->rx_queues = qcount; + hw->tx_queues = qcount; + hw->max_queues = qcount; + + num_vec = pci_msix_vec_count(pdev); + hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, + GFP_KERNEL); + if (!hw->irq_name) + goto err_free_netdev; + + hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, + sizeof(cpumask_var_t), GFP_KERNEL); + if (!hw->affinity_mask) + goto err_free_netdev; + + /* Map CSRs */ + pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!pf->reg_base) { + dev_err(dev, "Unable to map physical function CSRs, aborting\n"); + err = -ENOMEM; + goto err_free_netdev; + } + + err = otx2_check_pf_usable(pf); + if (err) + goto err_free_netdev; + + err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, + RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", + __func__, num_vec); + goto err_free_netdev; + } + + /* Init PF <=> AF mailbox stuff */ + err = otx2_pfaf_mbox_init(pf); + if (err) + goto err_free_irq_vectors; + + /* Register mailbox interrupt */ + err = otx2_register_mbox_intr(pf, true); + if (err) + goto err_mbox_destroy; + + /* Request AF to attach NPA and NIX LFs to this PF. + * NIX and NPA LFs are needed for this PF to function as a NIC. + */ + err = otx2_attach_npa_nix(pf); + if (err) + goto err_disable_mbox_intr; + + err = otx2_realloc_msix_vectors(pf); + if (err) + goto err_mbox_destroy; + + err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues); + if (err) + goto err_detach_rsrc; + + otx2_setup_dev_hw_settings(pf); + + /* Don't check for error. Proceed without ptp */ + otx2_ptp_init(pf); + + /* Assign default mac address */ + otx2_get_mac_from_af(netdev); + + /* NPA's pool is a stack to which SW frees buffer pointers via Aura. + * HW allocates buffer pointer from stack and uses it for DMA'ing + * ingress packet. In some scenarios HW can free back allocated buffer + * pointers to pool. This makes it impossible for SW to maintain a + * parallel list where physical addresses of buffer pointers (IOVAs) + * given to HW can be saved for later reference. + * + * So the only way to convert Rx packet's buffer address is to use + * IOMMU's iova_to_phys() handler which translates the address by + * walking through the translation tables. + */ + pf->iommu_domain = iommu_get_domain_for_dev(dev); + if (pf->iommu_domain) + pf->iommu_domain_type = + ((struct iommu_domain *)pf->iommu_domain)->type; + + netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6); + netdev->features |= netdev->hw_features; + /* Support TSO on tag interface */ + netdev->vlan_features |= netdev->features; + + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX; + + netdev->features |= netdev->hw_features | NETIF_F_LLTX; + + netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_NTUPLE | + NETIF_F_RXALL; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netdev->watchdog_timeo = OTX2_TX_TIMEOUT; + + netdev->netdev_ops = &otx2_netdev_ops; + + /* MTU range: 64 - 9190 */ + netdev->min_mtu = OTX2_MIN_MTU; + netdev->max_mtu = OTX2_MAX_MTU; + + INIT_WORK(&pf->reset_task, otx2_reset_task); + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Failed to register netdevice\n"); + goto err_ptp_destroy; + } + + err = otx2_mcam_flow_init(pf); + if (err) + goto err_unreg_netdev; + + otx2_set_ethtool_ops(netdev); + + /* Enable link notifications */ + otx2_cgx_config_linkevents(pf, true); + + return 0; + +err_unreg_netdev: + unregister_netdev(netdev); +err_ptp_destroy: + otx2_ptp_destroy(pf); +err_detach_rsrc: + otx2_detach_resources(&pf->mbox); +err_disable_mbox_intr: + otx2_disable_mbox_intr(pf); +err_mbox_destroy: + otx2_pfaf_mbox_destroy(pf); + otx2_pfvf_mbox_destroy(pf); +err_free_irq_vectors: + pci_free_irq_vectors(hw->pdev); +err_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_release_regions: + pci_release_regions(pdev); + return err; +} + +static void otx2_vf_link_event_task(struct work_struct *work) +{ + struct otx2_vf_config *config; + struct cgx_link_info_msg *req; + struct mbox_msghdr *msghdr; + struct otx2_nic *pf; + int vf_idx; + + config = container_of(work, struct otx2_vf_config, + link_event_work.work); + vf_idx = config - config->pf->vf_configs; + pf = config->pf; + + msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, + sizeof(*req), sizeof(struct msg_rsp)); + if (!msghdr) { + dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); + return; + } + + req = (struct cgx_link_info_msg *)msghdr; + req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); + + otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); +} + +static void otx2_vf_ptp_info_task(struct work_struct *work) +{ + struct cgx_ptp_rx_info_msg *req; + struct otx2_vf_config *config; + struct mbox_msghdr *msghdr; + struct otx2_nic *pf; + int vf_idx; + + config = container_of(work, struct otx2_vf_config, + ptp_info_work.work); + vf_idx = config - config->pf->vf_configs; + pf = config->pf; + + msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, + sizeof(*req), sizeof(struct msg_rsp)); + if (!msghdr) { + dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); + return; + } + + req = (struct cgx_ptp_rx_info_msg *)msghdr; + req->hdr.id = MBOX_MSG_CGX_PTP_RX_INFO; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->ptp_en = pf->ptp->ptp_en; + + otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); +} + +static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *pf = netdev_priv(netdev); + int ret, i; + + if (numvfs > pf->total_vfs) + numvfs = pf->total_vfs; + + /* Init PF <=> VF mailbox stuff */ + ret = otx2_pfvf_mbox_init(pf, numvfs); + if (ret) + return ret; + + ret = otx2_register_pfvf_mbox_intr(pf); + if (ret) + goto free_mbox; + + pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config), + GFP_KERNEL); + if (!pf->vf_configs) { + ret = -ENOMEM; + goto free_intr; + } + + for (i = 0; i < numvfs; i++) { + pf->vf_configs[i].pf = pf; + pf->vf_configs[i].intf_down = true; + INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, + otx2_vf_link_event_task); + INIT_DELAYED_WORK(&pf->vf_configs[i].ptp_info_work, + otx2_vf_ptp_info_task); + } + + ret = otx2_pf_flr_init(pf, numvfs); + if (ret) + goto free_configs; + + ret = otx2_register_flr_me_intr(pf); + if (ret) + goto free_flr; + + ret = pci_enable_sriov(pdev, numvfs); + if (ret) + goto free_flr_intr; + + return numvfs; +free_flr_intr: + otx2_disable_flr_me_intr(pf); +free_flr: + otx2_flr_wq_destroy(pf); +free_configs: + kfree(pf->vf_configs); +free_intr: + otx2_disable_pfvf_mbox_intr(pf); +free_mbox: + otx2_pfvf_mbox_destroy(pf); + return ret; +} + +static int otx2_sriov_disable(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *pf = netdev_priv(netdev); + int i; + + if (!pci_num_vf(pdev)) + return 0; + + pci_disable_sriov(pdev); + + for (i = 0; i < pci_num_vf(pdev); i++) { + cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); + cancel_delayed_work_sync(&pf->vf_configs[i].ptp_info_work); + } + kfree(pf->vf_configs); + + otx2_disable_flr_me_intr(pf); + otx2_flr_wq_destroy(pf); + otx2_disable_pfvf_mbox_intr(pf); + otx2_pfvf_mbox_destroy(pf); + + return 0; +} + +static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) +{ + if (numvfs == 0) + return otx2_sriov_disable(pdev); + else + return otx2_sriov_enable(pdev, numvfs); +} + +static void otx2_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *pf; + + if (!netdev) + return; + + pf = netdev_priv(netdev); + + if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) + otx2_config_hw_tx_tstamp(pf, false); + if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) + otx2_config_hw_rx_tstamp(pf, false); + + /* Disable link notifications */ + otx2_cgx_config_linkevents(pf, false); + + unregister_netdev(netdev); + + otx2_sriov_disable(pf->pdev); + otx2_ptp_destroy(pf); + otx2_mcam_flow_del(pf); + + otx2_detach_resources(&pf->mbox); + otx2_disable_mbox_intr(pf); + otx2_pfaf_mbox_destroy(pf); + pci_free_irq_vectors(pf->pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + + pci_release_regions(pdev); +} + +static struct pci_driver otx2_pf_driver = { + .name = DRV_NAME, + .id_table = otx2_pf_id_table, + .probe = otx2_probe, + .shutdown = otx2_remove, + .remove = otx2_remove, + .sriov_configure = otx2_sriov_configure +}; + +static int __init otx2_rvupf_init_module(void) +{ + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + return pci_register_driver(&otx2_pf_driver); +} + +static void __exit otx2_rvupf_cleanup_module(void) +{ + pci_unregister_driver(&otx2_pf_driver); +} + +module_init(otx2_rvupf_init_module); +module_exit(otx2_rvupf_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c new file mode 100644 index 000000000000..01a6961afc93 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 PTP support for ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "otx2_common.h" +#include "otx2_ptp.h" + +static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct ptp_req *req; + int err; + + if (!ptp->nic) + return -ENODEV; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return -ENOMEM; + + req->op = PTP_OP_ADJFINE; + req->scaled_ppm = scaled_ppm; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return err; + + return 0; +} + +static u64 ptp_cc_read(const struct cyclecounter *cc) +{ + struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); + struct ptp_req *req; + struct ptp_rsp *rsp; + int err; + + if (!ptp->nic) + return 0; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return 0; + + req->op = PTP_OP_GET_CLOCK; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return 0; + + rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + if (IS_ERR(rsp)) + return 0; + + return rsp->clk; +} + +static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + + otx2_mbox_lock(&ptp->nic->mbox); + timecounter_adjtime(&ptp->time_counter, delta); + otx2_mbox_unlock(&ptp->nic->mbox); + + return 0; +} + +static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + u64 nsec; + + otx2_mbox_lock(&ptp->nic->mbox); + nsec = timecounter_read(&ptp->time_counter); + otx2_mbox_unlock(&ptp->nic->mbox); + + *ts = ns_to_timespec64(nsec); + + return 0; +} + +static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + u64 nsec; + + nsec = timespec64_to_ns(ts); + + otx2_mbox_lock(&ptp->nic->mbox); + timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec); + otx2_mbox_unlock(&ptp->nic->mbox); + + return 0; +} + +static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +int otx2_ptp_init(struct otx2_nic *pfvf) +{ + struct otx2_ptp *ptp_ptr; + struct cyclecounter *cc; + struct ptp_req *req; + int err; + + otx2_mbox_lock(&pfvf->mbox); + /* check if PTP block is available */ + req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox); + if (!req) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + req->op = PTP_OP_GET_CLOCK; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + otx2_mbox_unlock(&pfvf->mbox); + return err; + } + otx2_mbox_unlock(&pfvf->mbox); + + ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL); + if (!ptp_ptr) { + err = -ENOMEM; + goto error; + } + + ptp_ptr->nic = pfvf; + + cc = &ptp_ptr->cycle_counter; + cc->read = ptp_cc_read; + cc->mask = CYCLECOUNTER_MASK(64); + cc->mult = 1; + cc->shift = 0; + + timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, + ktime_to_ns(ktime_get_real())); + + ptp_ptr->ptp_info = (struct ptp_clock_info) { + .owner = THIS_MODULE, + .name = "OcteonTX2 PTP", + .max_adj = 1000000000ull, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfine = otx2_ptp_adjfine, + .adjtime = otx2_ptp_adjtime, + .gettime64 = otx2_ptp_gettime, + .settime64 = otx2_ptp_settime, + .enable = otx2_ptp_enable, + }; + + ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); + if (IS_ERR(ptp_ptr->ptp_clock)) { + err = PTR_ERR(ptp_ptr->ptp_clock); + kfree(ptp_ptr); + goto error; + } + + pfvf->ptp = ptp_ptr; + +error: + return err; +} + +void otx2_ptp_destroy(struct otx2_nic *pfvf) +{ + struct otx2_ptp *ptp = pfvf->ptp; + + if (!ptp) + return; + + ptp_clock_unregister(ptp->ptp_clock); + kfree(ptp); + pfvf->ptp = NULL; +} + +int otx2_ptp_clock_index(struct otx2_nic *pfvf) +{ + if (!pfvf->ptp) + return -ENODEV; + + return ptp_clock_index(pfvf->ptp->ptp_clock); +} + +int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) +{ + if (!pfvf->ptp) + return -ENODEV; + + *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h new file mode 100644 index 000000000000..9ddb82cec195 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 PTP support for ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_PTP_H +#define OTX2_PTP_H + +int otx2_ptp_init(struct otx2_nic *pfvf); +void otx2_ptp_destroy(struct otx2_nic *pfvf); + +int otx2_ptp_clock_index(struct otx2_nic *pfvf); +int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns); + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h new file mode 100644 index 000000000000..84271cddd645 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_REG_H +#define OTX2_REG_H + +#include <rvu_struct.h> + +/* RVU PF registers */ +#define RVU_PF_VFX_PFVF_MBOX0 (0x00000) +#define RVU_PF_VFX_PFVF_MBOX1 (0x00008) +#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3) +#define RVU_PF_VF_BAR4_ADDR (0x10) +#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3) +#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3) +#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3) +#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3) +#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3) +#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3) +#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3) +#define RVU_PF_PFAF_MBOX0 (0xC00) +#define RVU_PF_PFAF_MBOX1 (0xC08) +#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3) +#define RVU_PF_INT (0xc20) +#define RVU_PF_INT_W1S (0xc28) +#define RVU_PF_INT_ENA_W1S (0xc30) +#define RVU_PF_INT_ENA_W1C (0xc38) +#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +/* RVU VF registers */ +#define RVU_VF_VFPF_MBOX0 (0x00000) +#define RVU_VF_VFPF_MBOX1 (0x00008) +#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3) +#define RVU_VF_INT (0x20) +#define RVU_VF_INT_W1S (0x28) +#define RVU_VF_INT_ENA_W1S (0x30) +#define RVU_VF_INT_ENA_W1C (0x38) +#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +#define RVU_FUNC_BLKADDR_SHIFT 20 +#define RVU_FUNC_BLKADDR_MASK 0x1FULL + +/* NPA LF registers */ +#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT) +#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3) +#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20) +#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28) +#define NPA_LF_AURA_OP_CNT (NPA_LFBASE | 0x30) +#define NPA_LF_AURA_OP_LIMIT (NPA_LFBASE | 0x50) +#define NPA_LF_AURA_OP_INT (NPA_LFBASE | 0x60) +#define NPA_LF_AURA_OP_THRESH (NPA_LFBASE | 0x70) +#define NPA_LF_POOL_OP_PC (NPA_LFBASE | 0x100) +#define NPA_LF_POOL_OP_AVAILABLE (NPA_LFBASE | 0x110) +#define NPA_LF_POOL_OP_PTR_START0 (NPA_LFBASE | 0x120) +#define NPA_LF_POOL_OP_PTR_START1 (NPA_LFBASE | 0x128) +#define NPA_LF_POOL_OP_PTR_END0 (NPA_LFBASE | 0x130) +#define NPA_LF_POOL_OP_PTR_END1 (NPA_LFBASE | 0x138) +#define NPA_LF_POOL_OP_INT (NPA_LFBASE | 0x160) +#define NPA_LF_POOL_OP_THRESH (NPA_LFBASE | 0x170) +#define NPA_LF_ERR_INT (NPA_LFBASE | 0x200) +#define NPA_LF_ERR_INT_W1S (NPA_LFBASE | 0x208) +#define NPA_LF_ERR_INT_ENA_W1C (NPA_LFBASE | 0x210) +#define NPA_LF_ERR_INT_ENA_W1S (NPA_LFBASE | 0x218) +#define NPA_LF_RAS (NPA_LFBASE | 0x220) +#define NPA_LF_RAS_W1S (NPA_LFBASE | 0x228) +#define NPA_LF_RAS_ENA_W1C (NPA_LFBASE | 0x230) +#define NPA_LF_RAS_ENA_W1S (NPA_LFBASE | 0x238) +#define NPA_LF_QINTX_CNT(a) (NPA_LFBASE | 0x300 | (a) << 12) +#define NPA_LF_QINTX_INT(a) (NPA_LFBASE | 0x310 | (a) << 12) +#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12) +#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12) +#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12) + +/* NIX LF registers */ +#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT) +#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3) +#define NIX_LF_CFG (NIX_LFBASE | 0x100) +#define NIX_LF_GINT (NIX_LFBASE | 0x200) +#define NIX_LF_GINT_W1S (NIX_LFBASE | 0x208) +#define NIX_LF_GINT_ENA_W1C (NIX_LFBASE | 0x210) +#define NIX_LF_GINT_ENA_W1S (NIX_LFBASE | 0x218) +#define NIX_LF_ERR_INT (NIX_LFBASE | 0x220) +#define NIX_LF_ERR_INT_W1S (NIX_LFBASE | 0x228) +#define NIX_LF_ERR_INT_ENA_W1C (NIX_LFBASE | 0x230) +#define NIX_LF_ERR_INT_ENA_W1S (NIX_LFBASE | 0x238) +#define NIX_LF_RAS (NIX_LFBASE | 0x240) +#define NIX_LF_RAS_W1S (NIX_LFBASE | 0x248) +#define NIX_LF_RAS_ENA_W1C (NIX_LFBASE | 0x250) +#define NIX_LF_RAS_ENA_W1S (NIX_LFBASE | 0x258) +#define NIX_LF_SQ_OP_ERR_DBG (NIX_LFBASE | 0x260) +#define NIX_LF_MNQ_ERR_DBG (NIX_LFBASE | 0x270) +#define NIX_LF_SEND_ERR_DBG (NIX_LFBASE | 0x280) +#define NIX_LF_TX_STATX(a) (NIX_LFBASE | 0x300 | (a) << 3) +#define NIX_LF_RX_STATX(a) (NIX_LFBASE | 0x400 | (a) << 3) +#define NIX_LF_OP_SENDX(a) (NIX_LFBASE | 0x800 | (a) << 3) +#define NIX_LF_RQ_OP_INT (NIX_LFBASE | 0x900) +#define NIX_LF_RQ_OP_OCTS (NIX_LFBASE | 0x910) +#define NIX_LF_RQ_OP_PKTS (NIX_LFBASE | 0x920) +#define NIX_LF_OP_IPSEC_DYNO_CN (NIX_LFBASE | 0x980) +#define NIX_LF_SQ_OP_INT (NIX_LFBASE | 0xa00) +#define NIX_LF_SQ_OP_OCTS (NIX_LFBASE | 0xa10) +#define NIX_LF_SQ_OP_PKTS (NIX_LFBASE | 0xa20) +#define NIX_LF_SQ_OP_STATUS (NIX_LFBASE | 0xa30) +#define NIX_LF_CQ_OP_INT (NIX_LFBASE | 0xb00) +#define NIX_LF_CQ_OP_DOOR (NIX_LFBASE | 0xb30) +#define NIX_LF_CQ_OP_STATUS (NIX_LFBASE | 0xb40) +#define NIX_LF_QINTX_CNT(a) (NIX_LFBASE | 0xC00 | (a) << 12) +#define NIX_LF_QINTX_INT(a) (NIX_LFBASE | 0xC10 | (a) << 12) +#define NIX_LF_QINTX_INT_W1S(a) (NIX_LFBASE | 0xC18 | (a) << 12) +#define NIX_LF_QINTX_ENA_W1S(a) (NIX_LFBASE | 0xC20 | (a) << 12) +#define NIX_LF_QINTX_ENA_W1C(a) (NIX_LFBASE | 0xC30 | (a) << 12) +#define NIX_LF_CINTX_CNT(a) (NIX_LFBASE | 0xD00 | (a) << 12) +#define NIX_LF_CINTX_WAIT(a) (NIX_LFBASE | 0xD10 | (a) << 12) +#define NIX_LF_CINTX_INT(a) (NIX_LFBASE | 0xD20 | (a) << 12) +#define NIX_LF_CINTX_INT_W1S(a) (NIX_LFBASE | 0xD30 | (a) << 12) +#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12) +#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) + +/* NIX AF transmit scheduler registers */ +#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) + +/* LMT LF registers */ +#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) +#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12) +#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400) + +#endif /* OTX2_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c new file mode 100644 index 000000000000..4f5f73f621d4 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Virtual Function ethernet driver + * + * Copyright (C) 2019 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "otx2_common.h" +#include "otx2_reg.h" +#include "otx2_struct.h" +#include "rvu_fixes.h" + +/* serialize device removal and xmit */ +DEFINE_MUTEX(remove_lock); + +static char pkt_data[64] = { 0x00, 0x0f, 0xb7, 0x11, 0xa6, 0x87, 0x02, 0xe0, + 0x28, 0xa5, 0xf6, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x04, 0x11, + 0xee, 0x53, 0x50, 0x50, 0x50, 0x02, 0x14, 0x14, + 0x14, 0x02, 0x10, 0x00, 0x10, 0x01, 0x00, 0x1e, + 0x00, 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, + 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76 }; + +static struct sk_buff *the_skb; +static struct otx2_nic *the_smqvf; +static u16 drop_entry = 0xFFFF; + +static bool is_otx2_smqvf(struct otx2_nic *vf) +{ + if (vf->pcifunc == RVU_SMQVF_PCIFUNC && + (is_96xx_A0(vf->pdev) || is_95xx_A0(vf->pdev))) + return true; + + return false; +} + +static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size) +{ + u64 status; + + /* Packet data stores should finish before SQE is flushed to HW */ + dma_wmb(); + + do { + memcpy(sq->lmt_addr, sq->sqe_base, size); + status = otx2_lmt_flush(sq->io_addr); + } while (status == 0); + + sq->head++; + sq->head &= (sq->sqe_cnt - 1); +} + +static int otx2_ctx_update(struct otx2_nic *vf, u16 qidx) +{ + struct nix_aq_enq_req *sq_aq, *rq_aq, *cq_aq; + + /* Do not link CQ for SQ and disable RQ, CQ */ + sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox); + if (!sq_aq) + return -ENOMEM; + + sq_aq->sq.cq_ena = 0; + sq_aq->sq_mask.cq_ena = 1; + sq_aq->qidx = qidx; + sq_aq->ctype = NIX_AQ_CTYPE_SQ; + sq_aq->op = NIX_AQ_INSTOP_WRITE; + + rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox); + if (!rq_aq) + return -ENOMEM; + + rq_aq->rq.ena = 0; + rq_aq->rq_mask.ena = 1; + rq_aq->qidx = qidx; + rq_aq->ctype = NIX_AQ_CTYPE_RQ; + rq_aq->op = NIX_AQ_INSTOP_WRITE; + + cq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox); + if (!cq_aq) + return -ENOMEM; + + cq_aq->cq.ena = 0; + cq_aq->cq_mask.ena = 1; + cq_aq->qidx = qidx; + cq_aq->ctype = NIX_AQ_CTYPE_CQ; + cq_aq->op = NIX_AQ_INSTOP_WRITE; + + return otx2_sync_mbox_msg(&vf->mbox); +} + +void otx2smqvf_xmit(void) +{ + struct otx2_snd_queue *sq; + int i, size; + + mutex_lock(&remove_lock); + + if (!the_smqvf) { + mutex_unlock(&remove_lock); + return; + } + + sq = &the_smqvf->qset.sq[0]; + /* Min. set of send descriptors required to send packets */ + size = sizeof(struct nix_sqe_hdr_s) + sizeof(struct nix_sqe_sg_s) + + sizeof(struct nix_sqe_ext_s) + sizeof(u64); + + for (i = 0; i < 256; i++) + otx2_sqe_flush(sq, size); + + mutex_unlock(&remove_lock); +} +EXPORT_SYMBOL(otx2smqvf_xmit); + +static int otx2smqvf_install_flow(struct otx2_nic *vf) +{ + struct npc_mcam_alloc_entry_req *alloc_req; + struct npc_mcam_free_entry_req *free_req; + struct npc_install_flow_req *install_req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct msg_req *msg; + int err, qid; + size_t size; + void *data; + + size = SKB_DATA_ALIGN(64 + OTX2_ALIGN) + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + err = -ENOMEM; + + data = kzalloc(size, GFP_KERNEL); + if (!data) + return err; + + memcpy(data, &pkt_data, 64); + + the_skb = build_skb(data, 0); + the_skb->len = 64; + + for (qid = 0; qid < vf->hw.tx_queues; qid++) { + err = otx2_ctx_update(vf, qid); + /* If something wrong with Q0 then treat as error */ + if (err && !qid) + goto err_free_mem; + } + + otx2_mbox_lock(&vf->mbox); + + alloc_req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&vf->mbox); + if (!alloc_req) { + otx2_mbox_unlock(&vf->mbox); + goto err_free_mem; + } + alloc_req->count = 1; + alloc_req->contig = true; + + /* Send message to AF */ + if (otx2_sync_mbox_msg(&vf->mbox)) { + err = -EINVAL; + otx2_mbox_unlock(&vf->mbox); + goto err_free_mem; + } + otx2_mbox_unlock(&vf->mbox); + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&vf->mbox.mbox, 0, &alloc_req->hdr); + drop_entry = rsp->entry; + + otx2_mbox_lock(&vf->mbox); + + /* Send messages to drop Tx packets at NPC and stop Rx traffic */ + install_req = otx2_mbox_alloc_msg_npc_install_flow(&vf->mbox); + if (!install_req) { + err = -ENOMEM; + otx2_mbox_unlock(&vf->mbox); + goto err_free_entry; + } + + u64_to_ether_addr(0x0ull, install_req->mask.dmac); + install_req->entry = drop_entry; + install_req->features = BIT_ULL(NPC_DMAC); + install_req->intf = NIX_INTF_TX; + install_req->op = NIX_TX_ACTIONOP_DROP; + install_req->set_cntr = 1; + + msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&vf->mbox); + if (!msg) { + otx2_mbox_unlock(&vf->mbox); + goto err_free_entry; + } + + /* Send message to AF */ + if (otx2_sync_mbox_msg(&vf->mbox)) { + err = -EINVAL; + otx2_mbox_unlock(&vf->mbox); + goto err_free_entry; + } + otx2_mbox_unlock(&vf->mbox); + + otx2_sq_append_skb(vf->netdev, &vf->qset.sq[0], the_skb, 0); + + return 0; + +err_free_entry: + otx2_mbox_lock(&vf->mbox); + free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox); + if (!free_req) { + dev_err(vf->dev, "Could not allocate msg for freeing entry\n"); + } else { + free_req->entry = drop_entry; + WARN_ON(otx2_sync_mbox_msg(&vf->mbox)); + } + otx2_mbox_unlock(&vf->mbox); +err_free_mem: + kfree_skb(the_skb); + drop_entry = 0xFFFF; + return err; +} + +int otx2smqvf_probe(struct otx2_nic *vf) +{ + int err; + + if (!is_otx2_smqvf(vf)) + return -EPERM; + + err = otx2_open(vf->netdev); + if (err) + return -EINVAL; + + /* Disable QINT interrupts because we do not use a CQ for SQ and + * drop TX packets intentionally + */ + otx2_write64(vf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); + + err = otx2smqvf_install_flow(vf); + if (err) { + otx2_stop(vf->netdev); + return -EINVAL; + } + + the_smqvf = vf; + + return 0; +} + +int otx2smqvf_remove(struct otx2_nic *vf) +{ + struct npc_mcam_free_entry_req *free_req; + struct npc_delete_flow_req *del_req; + + if (!is_otx2_smqvf(vf)) + return -EPERM; + + mutex_lock(&remove_lock); + kfree_skb(the_skb); + the_smqvf = NULL; + the_skb = NULL; + mutex_unlock(&remove_lock); + + otx2_mbox_lock(&vf->mbox); + del_req = otx2_mbox_alloc_msg_npc_delete_flow(&vf->mbox); + free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox); + if (!del_req || !free_req) { + dev_err(vf->dev, "Could not allocate msg for freeing entry\n"); + } else { + del_req->entry = drop_entry; + free_req->entry = drop_entry; + WARN_ON(otx2_sync_mbox_msg(&vf->mbox)); + } + otx2_mbox_unlock(&vf->mbox); + + otx2_stop(vf->netdev); + drop_entry = 0xFFFF; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h new file mode 100644 index 000000000000..fe0f0fb45294 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -0,0 +1,425 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_STRUCT_H +#define OTX2_STRUCT_H + +/* NIX WQE/CQE size 128 byte or 512 byte */ +enum nix_cqesz_e { + NIX_XQESZ_W64 = 0x0, + NIX_XQESZ_W16 = 0x1, +}; + +enum nix_sqes_e { + NIX_SQESZ_W16 = 0x0, + NIX_SQESZ_W8 = 0x1, +}; + +enum nix_send_ldtype { + NIX_SEND_LDTYPE_LDD = 0x0, + NIX_SEND_LDTYPE_LDT = 0x1, + NIX_SEND_LDTYPE_LDWB = 0x2, +}; + +/* CSUM offload */ +enum nix_sendl3type { + NIX_SENDL3TYPE_NONE = 0x0, + NIX_SENDL3TYPE_IP4 = 0x2, + NIX_SENDL3TYPE_IP4_CKSUM = 0x3, + NIX_SENDL3TYPE_IP6 = 0x4, +}; + +enum nix_sendl4type { + NIX_SENDL4TYPE_NONE, + NIX_SENDL4TYPE_TCP_CKSUM, + NIX_SENDL4TYPE_SCTP_CKSUM, + NIX_SENDL4TYPE_UDP_CKSUM, +}; + +/* NIX wqe/cqe types */ +enum nix_xqe_type { + NIX_XQE_TYPE_INVALID = 0x0, + NIX_XQE_TYPE_RX = 0x1, + NIX_XQE_TYPE_RX_IPSECS = 0x2, + NIX_XQE_TYPE_RX_IPSECH = 0x3, + NIX_XQE_TYPE_RX_IPSECD = 0x4, + NIX_XQE_TYPE_SEND = 0x8, +}; + +/* NIX CQE/SQE subdescriptor types */ +enum nix_subdc { + NIX_SUBDC_NOP = 0x0, + NIX_SUBDC_EXT = 0x1, + NIX_SUBDC_CRC = 0x2, + NIX_SUBDC_IMM = 0x3, + NIX_SUBDC_SG = 0x4, + NIX_SUBDC_MEM = 0x5, + NIX_SUBDC_JUMP = 0x6, + NIX_SUBDC_WORK = 0x7, + NIX_SUBDC_SOD = 0xf, +}; + +/* Algorithm for nix_sqe_mem_s header (value of the `alg` field) */ +enum nix_sendmemalg { + NIX_SENDMEMALG_E_SET = 0x0, + NIX_SENDMEMALG_E_SETTSTMP = 0x1, + NIX_SENDMEMALG_E_SETRSLT = 0x2, + NIX_SENDMEMALG_E_ADD = 0x8, + NIX_SENDMEMALG_E_SUB = 0x9, + NIX_SENDMEMALG_E_ADDLEN = 0xa, + NIX_SENDMEMALG_E_SUBLEN = 0xb, + NIX_SENDMEMALG_E_ADDMBUF = 0xc, + NIX_SENDMEMALG_E_SUBMBUF = 0xd, + NIX_SENDMEMALG_E_ENUM_LAST = 0xe, +}; + +/* NIX CQE header structure */ +struct nix_cqe_hdr_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type : 4; + u64 node : 2; + u64 reserved_52_57 : 6; + u64 q : 20; + u64 flow_tag : 32; +#else + u64 flow_tag : 32; + u64 q : 20; + u64 reserved_52_57 : 6; + u64 node : 2; + u64 cqe_type : 4; +#endif +}; + +/* NIX CQE RX parse structure */ +struct nix_rx_parse_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 lhtype : 4; + u64 lgtype : 4; + u64 lftype : 4; + u64 letype : 4; + u64 ldtype : 4; + u64 lctype : 4; + u64 lbtype : 4; + u64 latype : 4; + u64 errcode : 8; + u64 errlev : 4; + u64 wqwd : 1; + u64 express : 1; + u64 rsvd_17 : 1; + u64 desc_sizem1 : 5; + u64 chan : 12; +#else + u64 chan : 12; + u64 desc_sizem1 : 5; + u64 rsvd_17 : 1; + u64 express : 1; + u64 wqwd : 1; + u64 errlev : 4; + u64 errcode : 8; + u64 latype : 4; + u64 lbtype : 4; + u64 lctype : 4; + u64 ldtype : 4; + u64 letype : 4; + u64 lftype : 4; + u64 lgtype : 4; + u64 lhtype : 4; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 vtag1_tci : 16; + u64 vtag0_tci : 16; + u64 rsvd_95_94 : 2; + u64 pkind : 6; + u64 vtag1_gone : 1; + u64 vtag1_valid : 1; + u64 vtag0_gone : 1; + u64 vtag0_valid : 1; + u64 l3b : 1; + u64 l3m : 1; + u64 l2b : 1; + u64 l2m : 1; + u64 pkt_lenm1 : 16; +#else + u64 pkt_lenm1 : 16; + u64 l2m : 1; + u64 l2b : 1; + u64 l3m : 1; + u64 l3b : 1; + u64 vtag0_valid : 1; + u64 vtag0_gone : 1; + u64 vtag1_valid : 1; + u64 vtag1_gone : 1; + u64 pkind : 6; + u64 rsvd_95_94 : 2; + u64 vtag0_tci : 16; + u64 vtag1_tci : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ + u64 lhflags : 8; + u64 lgflags : 8; + u64 lfflags : 8; + u64 leflags : 8; + u64 ldflags : 8; + u64 lcflags : 8; + u64 lbflags : 8; + u64 laflags : 8; +#else + u64 laflags : 8; + u64 lbflags : 8; + u64 lcflags : 8; + u64 ldflags : 8; + u64 leflags : 8; + u64 lfflags : 8; + u64 lgflags : 8; + u64 lhflags : 8; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ + u64 match_id : 16; + u64 pb_aura : 20; + u64 wqe_aura : 20; + u64 eoh_ptr : 8; +#else + u64 eoh_ptr : 8; + u64 wqe_aura : 20; + u64 pb_aura : 20; + u64 match_id : 16; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ + u64 lhptr : 8; + u64 lgptr : 8; + u64 lfptr : 8; + u64 leptr : 8; + u64 ldptr : 8; + u64 lcptr : 8; + u64 lbptr : 8; + u64 laptr : 8; +#else + u64 laptr : 8; + u64 lbptr : 8; + u64 lcptr : 8; + u64 ldptr : 8; + u64 leptr : 8; + u64 lfptr : 8; + u64 lgptr : 8; + u64 lhptr : 8; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ + u64 rsvd_383_341 : 43; + u64 flow_key_alg : 5; + u64 vtag1_ptr : 8; + u64 vtag0_ptr : 8; +#else + u64 vtag0_ptr : 8; + u64 vtag1_ptr : 8; + u64 flow_key_alg : 5; + u64 rsvd_383_341 : 43; +#endif + u64 rsvd_447_384; /* W6 */ +}; + +/* NIX CQE RX scatter/gather subdescriptor structure */ +struct nix_rx_sg_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 subdc : 4; + u64 rsvd_59_50 : 10; + u64 segs : 2; + u64 seg3_size : 16; + u64 seg2_size : 16; + u64 seg1_size : 16; +#else + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_59_50 : 10; + u64 subdc : 4; +#endif +}; + +struct nix_send_comp_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 rsvd_24_63 : 40; + u64 sqe_id : 16; + u64 status : 8; +#else + u64 status : 8; + u64 sqe_id : 16; + u64 rsvd_24_63 : 40; +#endif +}; + +/* NIX SQE header structure */ +struct nix_sqe_hdr_s { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 sq : 20; /* W0 */ + u64 pnc : 1; + u64 sizem1 : 3; + u64 aura : 20; + u64 df : 1; + u64 reserved_18 : 1; + u64 total : 18; +#else + u64 total : 18; + u64 reserved_18 : 1; + u64 df : 1; + u64 aura : 20; + u64 sizem1 : 3; + u64 pnc : 1; + u64 sq : 20; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 sqe_id :16; + u64 il4type :4; + u64 il3type :4; + u64 ol4type :4; + u64 ol3type :4; + u64 il4ptr :8; + u64 il3ptr :8; + u64 ol4ptr :8; + u64 ol3ptr :8; +#else + u64 ol3ptr :8; + u64 ol4ptr :8; + u64 il3ptr :8; + u64 il4ptr :8; + u64 ol3type :4; + u64 ol4type :4; + u64 il3type :4; + u64 il4type :4; + u64 sqe_id :16; + +#endif +}; + +/* NIX send extended header subdescriptor structure */ +struct nix_sqe_ext_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 subdc : 4; + u64 mark_en : 1; + u64 markform : 7; + u64 markptr : 8; + u64 shp_ra : 2; + u64 shp_dis : 1; + u64 shp_chg : 9; + u64 rsvd_31_29 : 3; + u64 lso_format : 5; + u64 lso_sb : 8; + u64 tstmp : 1; + u64 lso : 1; + u64 lso_mps : 14; +#else + u64 lso_mps : 14; + u64 lso : 1; + u64 tstmp : 1; + u64 lso_sb : 8; + u64 lso_format : 5; + u64 rsvd_31_29 : 3; + u64 shp_chg : 9; + u64 shp_dis : 1; + u64 shp_ra : 2; + u64 markptr : 8; + u64 markform : 7; + u64 mark_en : 1; + u64 subdc : 4; +#endif +#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ + u64 rsvd_127_114 : 14; + u64 vlan1_ins_ena : 1; + u64 vlan0_ins_ena : 1; + u64 vlan1_ins_tci : 16; + u64 vlan1_ins_ptr : 8; + u64 vlan0_ins_tci : 16; + u64 vlan0_ins_ptr : 8; +#else + u64 vlan0_ins_ptr : 8; + u64 vlan0_ins_tci : 16; + u64 vlan1_ins_ptr : 8; + u64 vlan1_ins_tci : 16; + u64 vlan0_ins_ena : 1; + u64 vlan1_ins_ena : 1; + u64 rsvd_127_114 : 14; +#endif +}; + +struct nix_sqe_sg_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 subdc : 4; + u64 ld_type : 2; + u64 i3 : 1; + u64 i2 : 1; + u64 i1 : 1; + u64 rsvd_54_50 : 5; + u64 segs : 2; + u64 seg3_size : 16; + u64 seg2_size : 16; + u64 seg1_size : 16; +#else + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_54_50 : 5; + u64 i1 : 1; + u64 i2 : 1; + u64 i3 : 1; + u64 ld_type : 2; + u64 subdc : 4; +#endif +}; + +/* NIX send memory subdescriptor structure */ +struct nix_sqe_mem_s { +#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ + u64 subdc : 4; + u64 alg : 4; + u64 dsz : 2; + u64 wmem : 1; + u64 rsvd_52_16 : 37; + u64 offset : 16; +#else + u64 offset : 16; + u64 rsvd_52_16 : 37; + u64 wmem : 1; + u64 dsz : 2; + u64 alg : 4; + u64 subdc : 4; +#endif + u64 addr; +}; + +enum nix_cqerrint_e { + NIX_CQERRINT_DOOR_ERR = 0, + NIX_CQERRINT_WR_FULL = 1, + NIX_CQERRINT_CQE_FAULT = 2, +}; + +#define NIX_CQERRINT_BITS (BIT_ULL(NIX_CQERRINT_DOOR_ERR) | \ + BIT_ULL(NIX_CQERRINT_CQE_FAULT)) + +enum nix_rqint_e { + NIX_RQINT_DROP = 0, + NIX_RQINT_RED = 1, +}; + +#define NIX_RQINT_BITS (BIT_ULL(NIX_RQINT_DROP) | BIT_ULL(NIX_RQINT_RED)) + +enum nix_sqint_e { + NIX_SQINT_LMT_ERR = 0, + NIX_SQINT_MNQ_ERR = 1, + NIX_SQINT_SEND_ERR = 2, + NIX_SQINT_SQB_ALLOC_FAIL = 3, +}; + +#define NIX_SQINT_BITS (BIT_ULL(NIX_SQINT_LMT_ERR) | \ + BIT_ULL(NIX_SQINT_MNQ_ERR) | \ + BIT_ULL(NIX_SQINT_SEND_ERR) | \ + BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) + +#endif /* OTX2_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c new file mode 100644 index 000000000000..6864fa315900 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/etherdevice.h> +#include <net/ip.h> +#include <net/tso.h> + +#include "otx2_reg.h" +#include "otx2_common.h" +#include "otx2_struct.h" +#include "otx2_txrx.h" +#include "otx2_ptp.h" + +#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) + +static inline struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) +{ + struct nix_cqe_hdr_s *cqe_hdr; + + cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); + if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID) + return NULL; + + cq->cq_head++; + cq->cq_head &= (cq->cqe_cnt - 1); + + return cqe_hdr; +} + +static inline unsigned int frag_num(unsigned int i) +{ +#ifdef __BIG_ENDIAN + return (i & ~3) + 3 - (i & 3); +#else + return i; +#endif +} + +static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len) +{ + const struct skb_frag_struct *frag; + struct page *page; + int offset; + + /* First segment is always skb->data */ + if (!seg) { + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + *len = skb_headlen(skb); + } else { + frag = &skb_shinfo(skb)->frags[seg - 1]; + page = skb_frag_page(frag); + offset = frag->page_offset; + *len = skb_frag_size(frag); + } + return otx2_dma_map_page(pfvf, page, offset, *len, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) +{ + int seg; + + for (seg = 0; seg < sg->num_segs; seg++) { + otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], + sg->size[seg], DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } + sg->num_segs = 0; +} + +static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq, + struct otx2_snd_queue *sq, + struct nix_cqe_hdr_s *cqe_hdr, + int budget, int *tx_pkts, int *tx_bytes) +{ + struct nix_send_comp_s *snd_comp; + struct sk_buff *skb = NULL; + struct sg_list *sg; + + snd_comp = (struct nix_send_comp_s *) + ((void *)cqe_hdr + sizeof(*cqe_hdr)); + if (unlikely(snd_comp->status)) { + /* tx packet error handling*/ + if (netif_msg_tx_err(pfvf)) { + netdev_info(pfvf->netdev, + "TX%d: Error in send CQ status:%x\n", + cq->cint_idx, snd_comp->status); + } + } + + /* Barrier, so that update to sq by other cpus is visible */ + smp_mb(); + sg = &sq->sg[snd_comp->sqe_id]; + + skb = (struct sk_buff *)sg->skb; + if (unlikely(!skb)) + return; + + if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { + u64 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; + + if (timestamp != 1) { + u64 tsns; + int err; + + err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); + if (!err) { + struct skb_shared_hwtstamps ts; + + memset(&ts, 0, sizeof(ts)); + ts.hwtstamp = ns_to_ktime(tsns); + skb_tstamp_tx(skb, &ts); + } + } + } + + *tx_bytes += skb->len; + (*tx_pkts)++; + otx2_dma_unmap_skb_frags(pfvf, sg); + napi_consume_skb(skb, budget); + sg->skb = (u64)NULL; +} + +static inline void otx2_set_taginfo(struct nix_rx_parse_s *parse, + struct sk_buff *skb) +{ + /* Check if VLAN is present, captured and stripped from packet */ + if (parse->vtag0_valid && parse->vtag0_gone) { + skb_frag_t *frag0 = &skb_shinfo(skb)->frags[0]; + + /* Is the tag captured STAG or CTAG ? */ + if (((struct ethhdr *)skb_frag_address(frag0))->h_proto == + htons(ETH_P_8021Q)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + parse->vtag0_tci); + else + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + parse->vtag0_tci); + } +} + +static inline void otx2_set_rxhash(struct otx2_nic *pfvf, + struct nix_cqe_hdr_s *cqe_hdr, + struct sk_buff *skb) +{ + enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; + struct otx2_rss_info *rss; + u32 hash = 0; + + if (!(pfvf->netdev->features & NETIF_F_RXHASH)) + return; + + rss = &pfvf->hw.rss_info; + if (rss->flowkey_cfg) { + if (rss->flowkey_cfg & + ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)) + hash_type = PKT_HASH_TYPE_L4; + else + hash_type = PKT_HASH_TYPE_L3; + hash = cqe_hdr->flow_tag; + } + skb_set_hash(skb, hash, hash_type); +} + +static inline void otx2_set_rxtstamp(struct otx2_nic *pfvf, + struct sk_buff *skb, void *data) +{ + u64 tsns; + int err; + + if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) + return; + + /* The first 8 bytes is the timestamp */ + err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(u64 *)data), &tsns); + if (err) + return; + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); +} + +static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, + u64 iova, int len, struct nix_rx_parse_s *parse) +{ + struct page *page; + int off = 0; + void *va; + + va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); + + if (likely(!skb_shinfo(skb)->nr_frags)) { + /* Check if data starts at some nonzero offset + * from the start of the buffer. For now the + * only possible offset is 8 bytes in the case + * where packet is prepended by a timestamp. + */ + if (parse->laptr) { + otx2_set_rxtstamp(pfvf, skb, va); + off = 8; + } + off += pfvf->xtra_hdr; + } + + page = virt_to_page(va); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + va - page_address(page) + off, len - off, RCV_FRAG_LEN); + + otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); +} + +static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf, + struct nix_rx_parse_s *parse, int qidx) +{ + struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; + struct nix_rx_sg_s *sg; + void *start, *end; + u64 *iova; + int seg; + + if (netif_msg_rx_err(pfvf)) + netdev_err(pfvf->netdev, + "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n", + qidx, parse->errlev, parse->errcode); + + if (parse->errlev == NPC_ERRLVL_RE) { + switch (parse->errcode) { + case ERRCODE_FCS: + case ERRCODE_FCS_RCV: + atomic_inc(&stats->rx_fcs_errs); + break; + case ERRCODE_UNDERSIZE: + atomic_inc(&stats->rx_undersize_errs); + break; + case ERRCODE_OVERSIZE: + atomic_inc(&stats->rx_oversize_errs); + break; + case ERRCODE_OL2_LEN_MISMATCH: + atomic_inc(&stats->rx_len_errs); + break; + default: + atomic_inc(&stats->rx_other_errs); + break; + } + } else if (parse->errlev == NPC_ERRLVL_NIX) { + switch (parse->errcode) { + case ERRCODE_OL3_LEN: + case ERRCODE_OL4_LEN: + case ERRCODE_IL3_LEN: + case ERRCODE_IL4_LEN: + atomic_inc(&stats->rx_len_errs); + break; + case ERRCODE_OL4_CSUM: + case ERRCODE_IL4_CSUM: + atomic_inc(&stats->rx_csum_errs); + break; + default: + atomic_inc(&stats->rx_other_errs); + break; + } + } else { + atomic_inc(&stats->rx_other_errs); + /* For now ignore all the NPC parser errors and + * pass the packets to stack. + */ + return false; + } + + start = (void *)parse + sizeof(*parse); + end = start + ((parse->desc_sizem1 + 1) * 16); + while ((start + sizeof(*sg)) < end) { + sg = (struct nix_rx_sg_s *)start; + iova = (void *)sg + sizeof(*sg); + + /* If RXALL is enabled pass on packets to stack */ + if (sg->segs && pfvf->netdev->features & NETIF_F_RXALL) + return false; + + for (seg = 0; seg < sg->segs; seg++) { + otx2_aura_freeptr(pfvf, qidx, *iova & ~0x07ULL); + iova++; + } + if (sg->segs == 1) + start += sizeof(*sg) + sizeof(u64); + else + start += sizeof(*sg) + (3 * sizeof(u64)); + } + return true; +} + +static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, + struct napi_struct *napi, + struct otx2_cq_queue *cq, + struct nix_cqe_hdr_s *cqe_hdr) +{ + struct nix_rx_parse_s *parse; + struct sk_buff *skb = NULL; + struct nix_rx_sg_s *sg; + void *start, *end; + int seg, len; + u16 *sg_lens; + u64 *iova; + + /* CQE_HDR_S for a Rx pkt is always followed by RX_PARSE_S */ + parse = (struct nix_rx_parse_s *)((void *)cqe_hdr + sizeof(*cqe_hdr)); + if (unlikely(parse->errlev || parse->errcode)) { + if (otx2_check_rcv_errors(pfvf, parse, cq->cq_idx)) + return; + } + + start = (void *)parse + sizeof(*parse); + end = start + ((parse->desc_sizem1 + 1) * 16); + + skb = napi_get_frags(napi); + if (unlikely(!skb)) + return; + + /* Run through the each NIX_RX_SG_S subdc and frame the skb */ + while ((start + sizeof(*sg)) < end) { + sg = (struct nix_rx_sg_s *)start; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + + for (seg = 0; seg < sg->segs; seg++) { + len = sg_lens[frag_num(seg)]; + otx2_skb_add_frag(pfvf, skb, *iova, len, parse); + iova++; + } + cq->pool_ptrs += sg->segs; + + /* When SEGS = 1, only one IOVA is followed by NIX_RX_SG_S. + * When SEGS >= 2, three IOVAs will follow NIX_RX_SG_S, + * irrespective of whether 2 SEGS are valid or all 3. + */ + if (sg->segs == 1) + start += sizeof(*sg) + sizeof(u64); + else + start += sizeof(*sg) + (3 * sizeof(u64)); + } + + otx2_set_rxhash(pfvf, cqe_hdr, skb); + + skb_record_rx_queue(skb, cq->cq_idx); + if (pfvf->netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + otx2_set_taginfo(parse, skb); + + napi_gro_frags(napi); +} + +static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf, + struct napi_struct *napi, + struct otx2_cq_queue *cq, int budget) +{ + struct otx2_pool *rbpool = cq->rbpool; + struct nix_cqe_hdr_s *cqe_hdr; + int processed_cqe = 0; + s64 bufptr; + + /* Make sure HW writes to CQ are done */ + dma_rmb(); + while (likely(processed_cqe < budget)) { + cqe_hdr = otx2_get_next_cqe(cq); + if (unlikely(!cqe_hdr)) { + if (!processed_cqe) + return 0; + break; + } + otx2_rcv_pkt_handler(pfvf, napi, cq, cqe_hdr); + + cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID; + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); + + if (unlikely(!cq->pool_ptrs)) + return 0; + + /* Refill pool with new buffers */ + while (cq->pool_ptrs) { + bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_ATOMIC); + if (unlikely(bufptr <= 0)) { + struct refill_work *work; + struct delayed_work *dwork; + + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + break; + } + otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); + cq->pool_ptrs--; + } + otx2_get_page(rbpool); + + return processed_cqe; +} + +static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq, int budget) +{ + struct nix_cqe_hdr_s *cqe_hdr; + int tx_pkts = 0, tx_bytes = 0; + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + int processed_cqe = 0; + + sq = &pfvf->qset.sq[cq->cint_idx]; + + /* Make sure HW writes to CQ are done */ + dma_rmb(); + while (likely(processed_cqe < budget)) { + cqe_hdr = otx2_get_next_cqe(cq); + if (unlikely(!cqe_hdr)) { + if (!processed_cqe) + return 0; + break; + } + otx2_snd_pkt_handler(pfvf, cq, sq, cqe_hdr, budget, + &tx_pkts, &tx_bytes); + + cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID; + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); + + if (likely(tx_pkts)) { + txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx); + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + /* Check if queue was stopped earlier due to ring full */ + smp_mb(); + if (netif_tx_queue_stopped(txq) && + netif_carrier_ok(pfvf->netdev)) + netif_tx_wake_queue(txq); + } + return 0; +} + +int otx2_napi_handler(struct napi_struct *napi, int budget) +{ + struct otx2_cq_poll *cq_poll; + int workdone = 0, cq_idx, i; + struct otx2_cq_queue *cq; + struct otx2_qset *qset; + struct otx2_nic *pfvf; + + cq_poll = container_of(napi, struct otx2_cq_poll, napi); + pfvf = (struct otx2_nic *)cq_poll->dev; + qset = &pfvf->qset; + + for (i = CQS_PER_CINT - 1; i >= 0; i--) { + cq_idx = cq_poll->cq_ids[i]; + if (unlikely(cq_idx == CINT_INVALID_CQ)) + continue; + cq = &qset->cq[cq_idx]; + if (cq->cq_type == CQ_RX) { + /* If the RQ refill WQ task is running, skip napi + * scheduler for this queue. + */ + if (cq->refill_task_sched) + continue; + workdone += otx2_rx_napi_handler(pfvf, napi, + cq, budget); + } else { + workdone += otx2_tx_napi_handler(pfvf, cq, budget); + } + } + + /* Clear the IRQ */ + otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); + + if (workdone < budget && napi_complete_done(napi, workdone)) { + /* If interface is going down, don't re-enable IRQ */ + if (pfvf->flags & OTX2_FLAG_INTF_DOWN) + return workdone; + + /* Re-enable interrupts */ + otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), + BIT_ULL(0)); + } + return workdone; +} + +static inline void otx2_sqe_flush(struct otx2_snd_queue *sq, int size) +{ + u64 status; + + /* Packet data stores should finish before SQE is flushed to HW */ + dma_wmb(); + + do { + memcpy(sq->lmt_addr, sq->sqe_base, size); + status = otx2_lmt_flush(sq->io_addr); + } while (status == 0); + + sq->head++; + sq->head &= (sq->sqe_cnt - 1); +} + +#define MAX_SEGS_PER_SG 3 +/* Add SQE scatter/gather subdescriptor structure */ +static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u64 dma_addr, *iova = NULL; + u16 *sg_lens = NULL; + int seg, len; + + sq->sg[sq->head].num_segs = 0; + + for (seg = 0; seg < num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + } + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + return false; + + sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len; + sg->segs++; + *iova++ = dma_addr; + + /* Save DMA mapping info for later unmapping */ + sq->sg[sq->head].dma_addr[seg] = dma_addr; + sq->sg[sq->head].size[seg] = len; + sq->sg[sq->head].num_segs++; + } + + sq->sg[sq->head].skb = (u64)skb; + return true; +} + +/* Add SQE extended header subdescriptor */ +static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int *offset) +{ + struct nix_sqe_ext_s *ext; + + ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset); + ext->subdc = NIX_SUBDC_EXT; + if (skb_shinfo(skb)->gso_size) { + ext->lso = 1; + ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb); + ext->lso_mps = skb_shinfo(skb)->gso_size; + + /* Only TSOv4 and TSOv6 GSO offloads are supported */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + ext->lso_format = pfvf->hw.lso_tsov4_idx; + + /* HW adds payload size to 'ip_hdr->tot_len' while + * sending TSO segment, hence set payload length + * in IP header of the packet to just header length. + */ + ip_hdr(skb)->tot_len = + htons(ext->lso_sb - skb_network_offset(skb)); + } else { + ext->lso_format = pfvf->hw.lso_tsov6_idx; + ipv6_hdr(skb)->payload_len = + htons(ext->lso_sb - skb_network_offset(skb)); + } + } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + ext->tstmp = 1; + } + +#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN) + if (skb_vlan_tag_present(skb)) { + if (skb->vlan_proto == htons(ETH_P_8021Q)) { + ext->vlan1_ins_ena = 1; + ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET; + ext->vlan1_ins_tci = skb_vlan_tag_get(skb); + } else if (skb->vlan_proto == htons(ETH_P_8021AD)) { + ext->vlan0_ins_ena = 1; + ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET; + ext->vlan0_ins_tci = skb_vlan_tag_get(skb); + } + } + + *offset += sizeof(*ext); +} + +static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, + int alg, u64 iova) +{ + struct nix_sqe_mem_s *mem; + + mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset); + mem->subdc = NIX_SUBDC_MEM; + mem->alg = alg; + mem->wmem = 1; /* wait for the memory operation */ + mem->addr = iova; + + *offset += sizeof(*mem); +} + +/* Add SQE header subdescriptor structure */ +static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct nix_sqe_hdr_s *sqe_hdr, + struct sk_buff *skb, u16 qidx) +{ + int proto = 0; + + /* Check if SQE was framed before, if yes then no need to + * set these constants again anf again. + */ + if (!sqe_hdr->total) { + /* Don't free Tx buffers to Aura */ + sqe_hdr->df = 1; + sqe_hdr->aura = sq->aura_id; + /* Post a CQE Tx after pkt transmission */ + sqe_hdr->pnc = 1; + sqe_hdr->sq = qidx; + } + sqe_hdr->total = skb->len; + /* Set SQE identifier which will be used later for freeing SKB */ + sqe_hdr->sqe_id = sq->head; + + /* Offload TCP/UDP checksum to HW */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + sqe_hdr->ol3ptr = skb_network_offset(skb); + sqe_hdr->ol4ptr = skb_transport_offset(skb); + /* get vlan protocol Ethertype */ + if (eth_type_vlan(skb->protocol)) + skb->protocol = vlan_get_protocol(skb); + + if (skb->protocol == htons(ETH_P_IP)) { + proto = ip_hdr(skb)->protocol; + /* In case of TSO, HW needs this to be explicitly set. + * So set this always, instead of adding a check. + */ + sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + proto = ipv6_hdr(skb)->nexthdr; + } + + if (proto == IPPROTO_TCP) + sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; + else if (proto == IPPROTO_UDP) + sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM; + } +} + +static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, + struct otx2_snd_queue *sq, + struct sk_buff *skb, int sqe, int hdr_len) +{ + int num_segs = skb_shinfo(skb)->nr_frags + 1; + struct sg_list *sg = &sq->sg[sqe]; + u64 dma_addr; + int seg, len; + + sg->num_segs = 0; + + /* Get payload length at skb->data */ + len = skb_headlen(skb) - hdr_len; + + for (seg = 0; seg < num_segs; seg++) { + /* Skip skb->data, if there is no payload */ + if (!seg && !len) + continue; + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + goto unmap; + + /* Save DMA mapping info for later unmapping */ + sg->dma_addr[sg->num_segs] = dma_addr; + sg->size[sg->num_segs] = len; + sg->num_segs++; + } + return 0; +unmap: + otx2_dma_unmap_skb_frags(pfvf, sg); + return -EINVAL; +} + +static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq, + struct sk_buff *skb, int seg, + u64 seg_addr, int hdr_len, int sqe) +{ + const struct skb_frag_struct *frag; + struct sg_list *sg = &sq->sg[sqe]; + int offset; + + if (seg < 0) + return sg->dma_addr[0] + (seg_addr - (u64)skb->data); + + frag = &skb_shinfo(skb)->frags[seg]; + offset = (u64)(page_address(frag->page.p) + frag->page_offset); + offset = seg_addr - offset; + if (skb_headlen(skb) - hdr_len) + seg++; + return sg->dma_addr[seg] + offset; +} + +static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq, + struct sg_list *list, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u16 *sg_lens = NULL; + u64 *iova = NULL; + int seg; + + /* Add SG descriptors with buffer addresses */ + for (seg = 0; seg < list->num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + } + sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg]; + *iova++ = list->dma_addr[seg]; + sg->segs++; + } +} + +static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, u16 qidx) +{ + struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tcp_data, seg_len, pkt_len, offset; + struct nix_sqe_hdr_s *sqe_hdr; + int first_sqe = sq->head; + struct sg_list list; + struct tso_t tso; + + /* Map SKB's fragments to DMA. + * It's done here to avoid mapping for every TSO segment's packet. + */ + if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { + dev_kfree_skb_any(skb); + return; + } + + netdev_tx_sent_queue(txq, skb->len); + + tso_start(skb, &tso); + tcp_data = skb->len - hdr_len; + while (tcp_data > 0) { + char *hdr; + + seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data); + tcp_data -= seg_len; + + /* Set SQE's SEND_HDR */ + memset(sq->sqe_base, 0, sq->sqe_size); + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); + offset = sizeof(*sqe_hdr); + + /* Add TSO segment's pkt header */ + hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE); + tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0); + list.dma_addr[0] = + sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE); + list.size[0] = hdr_len; + list.num_segs = 1; + + /* Add TSO segment's payload data fragments */ + pkt_len = hdr_len; + while (seg_len > 0) { + int size; + + size = min_t(int, tso.size, seg_len); + + list.size[list.num_segs] = size; + list.dma_addr[list.num_segs] = + otx2_tso_frag_dma_addr(sq, skb, + tso.next_frag_idx - 1, + (u64)tso.data, hdr_len, + first_sqe); + list.num_segs++; + pkt_len += size; + seg_len -= size; + tso_build_data(skb, &tso, size); + } + sqe_hdr->total = pkt_len; + otx2_sqe_tso_add_sg(sq, &list, &offset); + + /* DMA mappings and skb needs to be freed only after last + * TSO segment is transmitted out. So set 'PNC' only for + * last segment. Also point last segment's sqe_id to first + * segment's SQE index where skb address and DMA mappings + * are saved. + */ + if (!tcp_data) { + sqe_hdr->pnc = 1; + sqe_hdr->sqe_id = first_sqe; + sq->sg[first_sqe].skb = (u64)skb; + } else { + sqe_hdr->pnc = 0; + } + + sqe_hdr->sizem1 = (offset / 16) - 1; + + /* Flush SQE to HW */ + otx2_sqe_flush(sq, offset); + } +} + +static inline bool is_hw_tso_supported(struct otx2_nic *pfvf, + struct sk_buff *skb) +{ + int payload_len, last_seg_size; + + if (!pfvf->hw.hw_tso) + return false; + + /* HW has an issue due to which when the payload of the last LSO + * segment is shorter than 16 bytes, some header fields may not + * be correctly modified, hence don't offload such TSO segments. + */ + payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + last_seg_size = payload_len % skb_shinfo(skb)->gso_size; + if (last_seg_size && last_seg_size < 16) + return false; + return true; +} + +static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) +{ + if (!skb_shinfo(skb)->gso_size) + return 1; + + /* HW TSO */ + if (is_hw_tso_supported(pfvf, skb)) + return 1; + + /* SW TSO */ + return skb_shinfo(skb)->gso_segs; +} + +static inline void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, + struct otx2_snd_queue *sq, int *offset) +{ + u64 iova; + + if (!skb_shinfo(skb)->gso_size && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + iova = sq->timestamps->iova + (sq->head * sizeof(u64)); + otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova); + } else { + skb_tx_timestamp(skb); + } +} + +bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, + struct sk_buff *skb, u16 qidx) +{ + struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); + struct otx2_nic *pfvf = netdev_priv(netdev); + int offset, num_segs, free_sqe; + struct nix_sqe_hdr_s *sqe_hdr; + + /* Check if there is room for new SQE. + * 'Num of SQBs freed to SQ's pool - SQ's Aura count' + * will give free SQE count. + */ + free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + + if (free_sqe < sq->sqe_thresh || + free_sqe < otx2_get_sqe_count(pfvf, skb)) + return false; + + num_segs = skb_shinfo(skb)->nr_frags + 1; + + /* If SKB doesn't fit in a single SQE, linearize it. + * TODO: Consider adding JUMP descriptor instead. + */ + if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return true; + } + num_segs = skb_shinfo(skb)->nr_frags + 1; + } + + if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { + /* Insert vlan tag before giving pkt to tso */ + if (skb_vlan_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); + otx2_sq_append_tso(pfvf, sq, skb, qidx); + return true; + } + + /* Set SQE's SEND_HDR. + * Do not clear the first 64bit as it contains constant info. + */ + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); + offset = sizeof(*sqe_hdr); + + /* Add extended header if needed */ + otx2_sqe_add_ext(pfvf, sq, skb, &offset); + + /* Add SG subdesc with data frags */ + if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { + otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); + return false; + } + + otx2_set_txtstamp(pfvf, skb, sq, &offset); + + sqe_hdr->sizem1 = (offset / 16) - 1; + + netdev_tx_sent_queue(txq, skb->len); + + /* Flush SQE to HW */ + otx2_sqe_flush(sq, offset); + + return true; +} +EXPORT_SYMBOL(otx2_sq_append_skb); + +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +{ + struct nix_cqe_hdr_s *cqe_hdr; + struct nix_rx_parse_s *parse; + struct nix_rx_sg_s *sg; + int processed_cqe = 0; + void *start, *end; + u64 *iova, pa; + int seg; + + /* Make sure HW writes to CQ are done */ + dma_rmb(); + while ((cqe_hdr = otx2_get_next_cqe(cq))) { + parse = (struct nix_rx_parse_s *) + ((void *)cqe_hdr + sizeof(*cqe_hdr)); + start = (void *)parse + sizeof(*parse); + end = start + ((parse->desc_sizem1 + 1) * 16); + while ((start + sizeof(*sg)) < end) { + sg = (struct nix_rx_sg_s *)start; + iova = (void *)sg + sizeof(*sg); + for (seg = 0; seg < sg->segs; seg++) { + /* Free IOVA */ + *iova -= OTX2_HEAD_ROOM; + pa = otx2_iova_to_phys(pfvf->iommu_domain, + *iova); + otx2_dma_unmap_page(pfvf, *iova, + RCV_FRAG_LEN, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + put_page(virt_to_page(phys_to_virt(pa))); + iova++; + } + start += sizeof(*sg); + start += (sg->segs == 1) ? + sizeof(u64) : 3 * sizeof(u64); + } + cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID; + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); +} + +void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +{ + struct nix_send_comp_s *snd_comp; + struct nix_cqe_hdr_s *cqe_hdr; + struct sk_buff *skb = NULL; + struct otx2_snd_queue *sq; + int processed_cqe = 0; + struct sg_list *sg; + + sq = &pfvf->qset.sq[cq->cint_idx]; + + /* Make sure HW writes to CQ are done */ + dma_rmb(); + while ((cqe_hdr = otx2_get_next_cqe(cq))) { + snd_comp = (struct nix_send_comp_s *) + ((void *)cqe_hdr + sizeof(*cqe_hdr)); + sg = &sq->sg[snd_comp->sqe_id]; + skb = (struct sk_buff *)sg->skb; + if (skb) { + otx2_dma_unmap_skb_frags(pfvf, sg); + dev_kfree_skb_any(skb); + sg->skb = (u64)NULL; + } + + cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID; + processed_cqe++; + } + + /* Free CQEs to HW */ + otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, + ((u64)cq->cq_idx << 32) | processed_cqe); +} + +int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) +{ + struct msg_req *msg; + int err; + + otx2_mbox_lock(&pfvf->mbox); + if (enable) + msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); + else + msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); + + if (!msg) { + otx2_mbox_unlock(&pfvf->mbox); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + otx2_mbox_unlock(&pfvf->mbox); + return err; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h new file mode 100644 index 000000000000..b898693ac08d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef OTX2_TXRX_H +#define OTX2_TXRX_H + +#include <linux/etherdevice.h> +#include <linux/iommu.h> +#include <linux/if_vlan.h> + +#define LBK_CHAN_BASE 0x000 +#define SDP_CHAN_BASE 0x700 +#define CGX_CHAN_BASE 0x800 + +#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */ +#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN) +#define RCV_FRAG_LEN1 \ + ((OTX2_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD)) + \ + (OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))) + +/* Prefer 2048 byte buffers for better last level cache + * utilization or data distribution across regions. + */ +#define RCV_FRAG_LEN ((RCV_FRAG_LEN1 < 2048) ? 2048 : RCV_FRAG_LEN1) + +#define OTX2_HEAD_ROOM OTX2_ALIGN + +#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) +#define OTX2_MIN_MTU 64 +#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN) + +#define OTX2_MAX_GSO_SEGS 255 +#define OTX2_MAX_FRAGS_IN_SQE 9 + +/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT] + * is equal to this value. + */ +#define CQ_CQE_THRESH_DEFAULT 10 + +/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT] + * is nonzero and this much time elapses after that. + */ +#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */ +#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */ +#define CQ_QCOUNT_DEFAULT 1 + +struct queue_stats { + u64 bytes; + u64 pkts; +}; + +struct otx2_rcv_queue { + struct queue_stats stats; +}; + +struct sg_list { + u16 num_segs; + u64 skb; + u64 size[OTX2_MAX_FRAGS_IN_SQE]; + u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE]; +}; + +struct otx2_snd_queue { + u8 aura_id; + u16 head; + u16 sqe_size; + u32 sqe_cnt; + u16 num_sqbs; + u16 sqe_thresh; + u8 sqe_per_sqb; + u64 io_addr; + u64 *aura_fc_addr; + u64 *lmt_addr; + void *sqe_base; + struct qmem *sqe; + struct qmem *tso_hdrs; + struct sg_list *sg; + struct qmem *timestamps; + struct queue_stats stats; + u16 sqb_count; + u64 *sqb_ptrs; +} ____cacheline_aligned_in_smp; + +enum cq_type { + CQ_RX, + CQ_TX, + CQS_PER_CINT = 2, /* RQ + SQ */ +}; + +struct otx2_cq_poll { + void *dev; +#define CINT_INVALID_CQ 255 + u8 cint_idx; + u8 cq_ids[CQS_PER_CINT]; + struct napi_struct napi; +}; + +struct otx2_pool { + struct qmem *stack; + struct qmem *fc_addr; + u16 rbsize; + u32 page_offset; + u16 pageref; + struct page *page; +}; + +#define CQ_OP_ERROR BIT_ULL(63) +#define CQ_CQ_ERROR BIT_ULL(46) + +struct otx2_cq_queue { + u8 cq_idx; + u8 cq_type; + u8 cint_idx; /* CQ interrupt id */ + u8 refill_task_sched; + u16 cqe_size; + u16 pool_ptrs; + u32 cqe_cnt; + u32 cq_head; + void *cqe_base; + struct qmem *cqe; + struct otx2_pool *rbpool; +} ____cacheline_aligned_in_smp; + +struct otx2_qset { + u32 rqe_cnt; + u32 sqe_cnt; /* Keep these two at top */ +#define OTX2_MAX_CQ_CNT 64 + u16 cq_cnt; + u16 xqe_size; + struct otx2_pool *pool; + struct otx2_cq_poll *napi; + struct otx2_cq_queue *cq; + struct otx2_snd_queue *sq; + struct otx2_rcv_queue *rq; +}; + +/* Translate IOVA to physical address */ +static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr) +{ + /* Translation is installed only when IOMMU is present */ + if (likely(iommu_domain)) + return iommu_iova_to_phys(iommu_domain, dma_addr); + return dma_addr; +} + +int otx2_napi_handler(struct napi_struct *napi, int budget); +bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, + struct sk_buff *skb, u16 qidx); +#endif /* OTX2_TXRX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c new file mode 100644 index 000000000000..218ce0382adb --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Virtual Function ethernet driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "otx2_common.h" +#include "otx2_reg.h" + +#define DRV_NAME "octeontx2-nicvf" +#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver" +#define DRV_VERSION "1.0" + +static const struct pci_device_id otx2_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, + { } +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); + +/** + * RVU VF Interrupt Vector Enumeration + */ +enum { + RVU_VF_INT_VEC_MBOX = 0x0, +}; + +static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool if_up = netif_running(netdev); + int err = 0; + + if (if_up) + otx2vf_stop(netdev); + + netdev_info(netdev, "Changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (if_up) + err = otx2vf_open(netdev); + + return err; +} + +static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, + struct mbox_msghdr *msg) +{ + if (msg->id >= MBOX_MSG_MAX) { + dev_err(vf->dev, + "Mbox msg with unknown ID %d\n", msg->id); + return; + } + + if (msg->sig != OTX2_MBOX_RSP_SIG) { + dev_err(vf->dev, + "Mbox msg with wrong signature %x, ID %d\n", + msg->sig, msg->id); + return; + } + + if (msg->rc == MBOX_MSG_INVALID) { + dev_err(vf->dev, + "PF/AF says the sent msg(s) %d were invalid\n", + msg->id); + return; + } + + switch (msg->id) { + case MBOX_MSG_READY: + vf->pcifunc = msg->pcifunc; + break; + case MBOX_MSG_MSIX_OFFSET: + mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); + break; + case MBOX_MSG_NPA_LF_ALLOC: + mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_LF_ALLOC: + mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_TXSCH_ALLOC: + mbox_handler_nix_txsch_alloc(vf, + (struct nix_txsch_alloc_rsp *)msg); + break; + case MBOX_MSG_NIX_BP_ENABLE: + mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); + break; + default: + if (msg->rc) + dev_err(vf->dev, + "Mbox msg response has err %d, ID %d\n", + msg->rc, msg->id); + } +} + +static void otx2vf_vfaf_mbox_handler(struct work_struct *work) +{ + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + struct mbox *af_mbox; + int offset, id; + + af_mbox = container_of(work, struct mbox, mbox_wrk); + mbox = &af_mbox->mbox; + mdev = &mbox->dev[0]; + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (af_mbox->num_msgs == 0) + return; + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < af_mbox->num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); + offset = mbox->rx_start + msg->next_msgoff; + mdev->msgs_acked++; + } + + otx2_mbox_reset(mbox, 0); +} + +static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, + struct mbox_msghdr *req) +{ + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) { + otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + + switch (req->id) { +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + &vf->mbox.mbox_up, 0, \ + sizeof(struct _rsp_type)); \ + if (!rsp) \ + return -ENOMEM; \ + \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = 0; \ + rsp->hdr.rc = 0; \ + \ + err = otx2_mbox_up_handler_ ## _fn_name( \ + vf, (struct _req_type *)req, rsp); \ + return err; \ + } +MBOX_UP_CGX_MESSAGES +#undef M + break; + default: + otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); + return -ENODEV; + } + return 0; +} + +static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) +{ + struct otx2_mbox_dev *mdev; + struct mbox_hdr *rsp_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + struct mbox *vf_mbox; + struct otx2_nic *vf; + int offset, id; + + vf_mbox = container_of(work, struct mbox, mbox_up_wrk); + vf = vf_mbox->pfvf; + mbox = &vf_mbox->mbox_up; + mdev = &mbox->dev[0]; + + rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (vf_mbox->up_num_msgs == 0) + return; + + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < vf_mbox->up_num_msgs; id++) { + msg = (struct mbox_msghdr *)(mdev->mbase + offset); + otx2vf_process_mbox_msg_up(vf, msg); + offset = mbox->rx_start + msg->next_msgoff; + } + + otx2_mbox_msg_send(mbox, 0); +} + +static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) +{ + struct otx2_nic *vf = (struct otx2_nic *)vf_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + + /* Read latest mbox data */ + smp_rmb(); + + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) { + vf->mbox.num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + } + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) { + vf->mbox.up_num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + memset(mbox->hwbase + mbox->rx_start, 0, + ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + } + /* Clear the IRQ */ + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + + return IRQ_HANDLED; +} + +static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) +{ + int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX); + + /* Disable VF => PF mailbox IRQ */ + otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); + free_irq(vector, vf); +} + +static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) +{ + struct otx2_hw *hw = &vf->hw; + struct msg_req *req; + char *irq_name; + int err; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + if (err) { + dev_err(vf->dev, + "RVUPF: IRQ registration failed for VFAF mbox irq\n"); + return err; + } + + /* Enable mailbox interrupt for msgs coming from PF. + * First clear to avoid spurious interrupts, if any. + */ + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + + if (!probe_pf) + return 0; + + /* Check mailbox communication with PF */ + req = otx2_mbox_alloc_msg_ready(&vf->mbox); + if (!req) { + otx2vf_disable_mbox_intr(vf); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&vf->mbox); + if (err) { + dev_warn(vf->dev, + "AF not responding to mailbox, deferring probe\n"); + otx2vf_disable_mbox_intr(vf); + return -EPROBE_DEFER; + } + return 0; +} + +static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) +{ + struct mbox *mbox = &vf->mbox; + + if (vf->mbox_wq) { + flush_workqueue(vf->mbox_wq); + destroy_workqueue(vf->mbox_wq); + vf->mbox_wq = NULL; + } + + if (mbox->mbox.hwbase) + iounmap((void __iomem *)mbox->mbox.hwbase); + + otx2_mbox_destroy(&mbox->mbox); + otx2_mbox_destroy(&mbox->mbox_up); +} + +static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) +{ + struct mbox *mbox = &vf->mbox; + void __iomem *hwbase; + int err; + + mbox->pfvf = vf; + vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 1); + if (!vf->mbox_wq) + return -ENOMEM; + + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e PF0) and this VF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM), + pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM)); + if (!hwbase) { + dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, + MBOX_DIR_VFPF, 1); + if (err) + goto exit; + + err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base, + MBOX_DIR_VFPF_UP, 1); + if (err) + goto exit; + + err = otx2_mbox_bbuf_init(mbox, vf->pdev); + if (err) + goto exit; + + INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); + INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); + otx2_mbox_lock_init(&vf->mbox); + + return 0; +exit: + destroy_workqueue(vf->mbox_wq); + return err; +} + +int otx2vf_open(struct net_device *netdev) +{ + struct otx2_nic *vf; + int err; + + err = otx2_open(netdev); + if (err) + return err; + + /* LBKs do not receive link events so tell everyone we are up here */ + vf = netdev_priv(netdev); + if (is_otx2_lbkvf(vf->pdev)) { + pr_info("%s NIC Link is UP\n", netdev->name); + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } + + return 0; +} +EXPORT_SYMBOL(otx2vf_open); + +int otx2vf_stop(struct net_device *netdev) +{ + return otx2_stop(netdev); +} +EXPORT_SYMBOL(otx2vf_stop); + +static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct otx2_nic *vf = netdev_priv(netdev); + int qidx = skb_get_queue_mapping(skb); + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + + /* Check for minimum and maximum packet length */ + if (skb->len <= ETH_HLEN || + (!skb_shinfo(skb)->gso_size && skb->len > vf->max_frs)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + sq = &vf->qset.sq[qidx]; + + txq = netdev_get_tx_queue(netdev, qidx); + if (!netif_tx_queue_stopped(txq) && + !otx2_sq_append_skb(netdev, sq, skb, qidx)) { + netif_tx_stop_queue(txq); + + /* Barrier, for stop_queue visible to be on other cpus */ + smp_mb(); + if ((sq->num_sqbs - *sq->aura_fc_addr) > 1) + netif_tx_start_queue(txq); + else + netdev_warn(netdev, + "%s: No free SQE/SQB, stopping SQ%d\n", + netdev->name, qidx); + + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static void otx2vf_reset_task(struct work_struct *work) +{ + struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); + + if (!netif_running(vf->netdev)) + return; + + otx2vf_stop(vf->netdev); + otx2vf_open(vf->netdev); + netif_trans_update(vf->netdev); +} + +static netdev_features_t +otx2_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + return features; +} + +static const struct net_device_ops otx2vf_netdev_ops = { + .ndo_open = otx2vf_open, + .ndo_stop = otx2vf_stop, + .ndo_start_xmit = otx2vf_xmit, + .ndo_set_mac_address = otx2_set_mac_address, + .ndo_change_mtu = otx2vf_change_mtu, + .ndo_get_stats64 = otx2_get_stats64, + .ndo_tx_timeout = otx2_tx_timeout, + .ndo_features_check = otx2_features_check, +}; + +static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) +{ + struct otx2_hw *hw = &vf->hw; + int num_vec, err; + + num_vec = hw->nix_msixoff; + num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; + + otx2vf_disable_mbox_intr(vf); + pci_free_irq_vectors(hw->pdev); + pci_free_irq_vectors(hw->pdev); + err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n", + __func__, num_vec); + return err; + } + + err = otx2vf_register_mbox_intr(vf, false); + if (err) + return err; + return 0; +} + +static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int num_vec = pci_msix_vec_count(pdev); + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct otx2_nic *vf; + struct otx2_hw *hw; + int err, qcount; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set DMA mask\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set consistent DMA mask\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + qcount = num_online_cpus(); + netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount); + if (!netdev) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + vf = netdev_priv(netdev); + vf->netdev = netdev; + vf->pdev = pdev; + vf->dev = dev; + vf->iommu_domain = iommu_get_domain_for_dev(dev); + if (vf->iommu_domain) + vf->iommu_domain_type = + ((struct iommu_domain *)vf->iommu_domain)->type; + + vf->flags |= OTX2_FLAG_INTF_DOWN; + hw = &vf->hw; + hw->pdev = vf->pdev; + hw->rx_queues = qcount; + hw->tx_queues = qcount; + hw->max_queues = qcount; + + hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, + GFP_KERNEL); + if (!hw->irq_name) + goto err_free_netdev; + + hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, + sizeof(cpumask_var_t), GFP_KERNEL); + if (!hw->affinity_mask) + goto err_free_netdev; + + err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", + __func__, num_vec); + goto err_free_netdev; + } + + vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!vf->reg_base) { + dev_err(dev, "Unable to map physical function CSRs, aborting\n"); + err = -ENOMEM; + goto err_free_irq_vectors; + } + + /* Init VF <=> PF mailbox stuff */ + err = otx2vf_vfaf_mbox_init(vf); + if (err) + goto err_free_irq_vectors; + + /* Register mailbox interrupt */ + err = otx2vf_register_mbox_intr(vf, true); + if (err) + goto err_mbox_destroy; + + /* Request AF to attach NPA and LIX LFs to this AF */ + err = otx2_attach_npa_nix(vf); + if (err) + goto err_disable_mbox_intr; + + err = otx2vf_realloc_msix_vectors(vf); + if (err) + goto err_mbox_destroy; + + err = otx2_set_real_num_queues(netdev, qcount, qcount); + if (err) + goto err_detach_rsrc; + + otx2_setup_dev_hw_settings(vf); + + err = otx2smqvf_probe(vf); + if (!err) + return 0; + else if (err == -EINVAL) + goto err_detach_rsrc; + + /* Assign default mac address */ + otx2_get_mac_from_af(netdev); + + netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netdev->features = netdev->hw_features; + /* Support TSO on tag interface */ + netdev->vlan_features |= netdev->features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + netdev->features |= netdev->hw_features; + netdev->watchdog_timeo = OTX2_TX_TIMEOUT; + + netdev->netdev_ops = &otx2vf_netdev_ops; + + /* MTU range: 68 - 9190 */ + netdev->min_mtu = OTX2_MIN_MTU; + netdev->max_mtu = OTX2_MAX_MTU; + + INIT_WORK(&vf->reset_task, otx2vf_reset_task); + + if (is_otx2_lbkvf(vf->pdev)) { + int n; + + n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; + /* Need to subtract 1 to get proper VF number */ + n -= 1; + snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); + } + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Failed to register netdevice\n"); + goto err_detach_rsrc; + } + + otx2vf_set_ethtool_ops(netdev); + + return 0; + +err_detach_rsrc: + otx2_detach_resources(&vf->mbox); +err_disable_mbox_intr: + otx2vf_disable_mbox_intr(vf); +err_mbox_destroy: + otx2vf_vfaf_mbox_destroy(vf); +err_free_irq_vectors: + pci_free_irq_vectors(hw->pdev); +err_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_release_regions: + pci_release_regions(pdev); + return err; +} + +static void otx2vf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct otx2_nic *vf; + + if (!netdev) + return; + + vf = netdev_priv(netdev); + + if (otx2smqvf_remove(vf)) + unregister_netdev(netdev); + + otx2vf_disable_mbox_intr(vf); + + otx2_detach_resources(&vf->mbox); + otx2vf_vfaf_mbox_destroy(vf); + pci_free_irq_vectors(vf->pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + + pci_release_regions(pdev); +} + +static struct pci_driver otx2vf_driver = { + .name = DRV_NAME, + .id_table = otx2_vf_id_table, + .probe = otx2vf_probe, + .remove = otx2vf_remove, + .shutdown = otx2vf_remove, +}; + +static int __init otx2vf_init_module(void) +{ + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + return pci_register_driver(&otx2vf_driver); +} + +static void __exit otx2vf_cleanup_module(void) +{ + pci_unregister_driver(&otx2vf_driver); +} + +module_init(otx2vf_init_module); +module_exit(otx2vf_cleanup_module); |