aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2226
1 files changed, 2040 insertions, 186 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 5205796859f6..d71c3e51373a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2019 Marvell International Ltd.
+ * Copyright (C) 2019 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifdef CONFIG_DEBUG_FS
@@ -19,6 +16,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "npc.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -97,7 +95,7 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
- [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
@@ -109,6 +107,89 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT17] = "Control/PAUSE packets sent",
};
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65-127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65-127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
blk_addr, NDC_AF_CONST) & 0xFF)
@@ -139,6 +220,96 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int index = 0, off = 0;
+ int bytes_not_copied;
+ int buf_size = 10240;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
static void get_lf_str_list(struct rvu_block block, int pcifunc,
char *lfs)
{
@@ -237,6 +408,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
kfree(buf);
return -ENOMEM;
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
"pcifunc");
for (index = 0; index < BLK_COUNT; index++)
@@ -312,18 +484,59 @@ out:
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
-static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
struct rvu_block *block;
struct rvu_hwinfo *hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
- if (blkaddr < 0) {
- dev_warn(rvu->dev, "Invalid blktype\n");
- return false;
- }
hw = rvu->hw;
block = &hw->block[blkaddr];
@@ -379,10 +592,12 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
{
void (*print_qsize)(struct seq_file *filp,
struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
+ int blkaddr;
rvu = filp->private;
switch (blktype) {
@@ -400,7 +615,15 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -417,6 +640,8 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
struct seq_file *seqfile = filp->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -438,12 +663,20 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
goto qsize_write_done;
}
@@ -476,6 +709,7 @@ RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
@@ -495,6 +729,9 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
(u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
aura->fc_up_crossing, aura->fc_stype);
seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
@@ -512,12 +749,15 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
}
/* Dumps given NPA Pool's context */
static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
@@ -539,6 +779,8 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
pool->avg_con, pool->fc_ena, pool->fc_stype);
seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
@@ -552,8 +794,10 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
pool->thresh_int_ena, pool->thresh_up);
- seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
}
/* Reads aura/pool's ctx from admin queue */
@@ -586,7 +830,7 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -644,7 +888,7 @@ static int write_npa_ctx(struct rvu *rvu, bool all,
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -792,9 +1036,17 @@ static void ndc_cache_stats(struct seq_file *s, int blk_addr,
int ctype, int transaction)
{
u64 req, out_req, lat, cant_alloc;
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int port;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
for (port = 0; port < NDC_MAX_PORT; port++) {
req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
(port, ctype, transaction));
@@ -837,9 +1089,17 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
{
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int bank, max_bank;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
max_bank = NDC_MAX_BANK(rvu, blk_addr);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
@@ -855,16 +1115,30 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_RX,
- BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_TX,
- BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
@@ -880,8 +1154,14 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
@@ -889,16 +1169,92 @@ RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
@@ -959,10 +1315,94 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
(u64)sq_ctx->dropped_pkts);
}
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
/* Dumps given nix_rq's context */
static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
rq_ctx->wqe_aura, rq_ctx->substream);
@@ -1057,7 +1497,8 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
{
void (*print_nix_ctx)(struct seq_file *filp,
struct nix_aq_enq_rsp *rsp) = NULL;
- struct rvu *rvu = filp->private;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
char *ctype_string = NULL;
@@ -1089,7 +1530,7 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1141,13 +1582,15 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
}
static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
- int id, int ctype, char *ctype_string)
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
{
+ struct nix_hw *nix_hw = m->private;
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1207,7 +1650,8 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
int ctype)
{
struct seq_file *m = filp->private_data;
- struct rvu *rvu = m->private;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
char *cmd_buf, *ctype_string;
int nixlf, id = 0, ret;
bool all = false;
@@ -1243,7 +1687,7 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
goto done;
} else {
ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
- ctype_string);
+ ctype_string, m);
}
done:
kfree(cmd_buf);
@@ -1347,102 +1791,242 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
-static void rvu_dbg_nix_init(struct rvu *rvu)
+static ssize_t rvu_dbg_nix_tx_stall_hwissue_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ return rvu_nix_get_tx_stall_counters(filp->private_data, buffer, ppos);
+}
- rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.nix) {
- dev_err(rvu->dev, "create debugfs dir failed for nix\n");
- return;
- }
+RVU_DEBUG_FOPS(nix_tx_stall_hwissue, nix_tx_stall_hwissue_display, NULL);
- pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_sq_ctx_fops);
- if (!pfile)
- goto create_failed;
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
- pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_rq_ctx_fops);
- if (!pfile)
- goto create_failed;
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
- pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_cq_ctx_fops);
- if (!pfile)
- goto create_failed;
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
- pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_tx_cache_fops);
- if (!pfile)
- goto create_failed;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
- pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_rx_cache_fops);
- if (!pfile)
- goto create_failed;
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
- pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
- pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ ipolicer = &nix_hw->ipolicer[layer];
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
- if (!pfile)
- goto create_failed;
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
- return;
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.nix);
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
}
-static void rvu_dbg_npa_init(struct rvu *rvu)
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
- rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npa)
- return;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_qsize_fops);
- if (!pfile)
- goto create_failed;
+ return 0;
+}
- pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_aura_ctx_fops);
- if (!pfile)
- goto create_failed;
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
- pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_pool_ctx_fops);
- if (!pfile)
- goto create_failed;
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
+{
+ struct nix_hw *nix_hw;
- pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_ndc_cache_fops);
- if (!pfile)
- goto create_failed;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
- pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
- rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ if (blkaddr == BLKADDR_NIX0) {
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev,
+ "create debugfs dir failed for nix1\n");
+ return;
+ }
+ nix_hw = &rvu->hw->nix[1];
+ }
- return;
+ debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_sq_ctx_fops);
+ debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_rq_ctx_fops);
+ debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_cq_ctx_fops);
+ debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
+ if (is_rvu_96xx_A0(rvu)) {
+ debugfs_create_file("tx_stall_hwissue", 0600,
+ rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tx_stall_hwissue_fops);
+ }
+}
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npa);
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_hits_miss_fops);
}
#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
@@ -1468,6 +2052,7 @@ create_failed:
static int cgx_print_stats(struct seq_file *s, int lmac_id)
{
struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
void *cgxd = s->private;
u64 ucast, mcast, bcast;
int stat = 0, err = 0;
@@ -1479,6 +2064,11 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (!rvu)
return -ENODEV;
+ mac_ops = get_mac_ops(cgxd);
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+
/* Link status */
seq_puts(s, "\n=======Link Status======\n\n");
err = cgx_get_link_info(cgxd, lmac_id, &linfo);
@@ -1488,7 +2078,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
linfo.link_up ? "UP" : "DOWN", linfo.speed);
/* Rx stats */
- seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
if (err)
return err;
@@ -1510,7 +2101,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Tx stats */
- seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
if (err)
return err;
@@ -1529,33 +2121,43 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Rx stats */
- seq_puts(s, "\n=======CGX RX_STATS======\n\n");
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
stat++;
}
/* Tx stats */
stat = 0;
- seq_puts(s, "\n=======CGX TX_STATS======\n\n");
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
stat++;
}
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1563,53 +2165,127 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
int i, lmac_id;
char dname[20];
void *cgx;
- rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
cgx = rvu_cgx_pdata(i, rvu);
if (!cgx)
continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
/* cgx debugfs dir */
- sprintf(dname, "cgx%d", i);
+ sprintf(dname, "%s%d", mac_ops->name, i);
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+ for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- pfile = debugfs_create_file("stats", 0600,
- rvu->rvu_dbg.lmac, cgx,
- &rvu_dbg_cgx_stat_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
}
/* NPC debugfs APIs */
@@ -1620,9 +2296,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
- /* Skip PF0 */
- if (!pcifunc)
- return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -1653,7 +2326,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
struct rvu *rvu = filp->private;
int pf, vf, numvfs, blkaddr;
struct npc_mcam *mcam;
- u16 pcifunc;
+ u16 pcifunc, counters;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1661,6 +2334,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return -ENODEV;
mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
seq_puts(filp, "\nNPC MCAM info:\n");
/* MCAM keywidth on receive and transmit sides */
@@ -1683,10 +2357,9 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
/* MCAM counters */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- cfg = (cfg >> 48) & 0xFFFF;
- seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
- seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
seq_printf(filp, "\t\t Available \t: %d\n",
rvu_rsrc_free_count(&mcam->counters));
@@ -1713,6 +2386,989 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return 0;
}
+static int parse_sso_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *ssolf,
+ bool *all)
+{
+ int ret, bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else{
+ ret = subtoken ? kstrtoint(subtoken, 10, ssolf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sso_hwgrp_display_iq_list(struct rvu *rvu, int ssolf, u16 idx,
+ u16 tail_idx, u8 queue_type)
+{
+ const char *queue[3] = {"DQ", "CQ", "AQ"};
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue head[%d]", ssolf,
+ queue[queue_type], idx);
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue tail[%d]", ssolf,
+ queue[queue_type], tail_idx);
+ pr_info("--------------------------------------------------\n");
+ do {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] TAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] GRP 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_PENDTAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] PENDTAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] LINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_QLINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] QLINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+ pr_info("--------------------------------------------------\n");
+ if (idx == tail_idx)
+ break;
+ idx = reg & 0x1FFF;
+ } while (idx != 0x1FFF);
+}
+
+static void sso_hwgrp_display_taq_list(struct rvu *rvu, int ssolf, u8 wae_head,
+ u16 ent_head, u8 wae_used, u8 taq_lines)
+{
+ int i, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("--------------------------------------------------\n");
+ do {
+ for (i = wae_head; i < taq_lines && wae_used; i++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_TAG(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] TAG 0x%llx\n",
+ ssolf, ent_head, i, reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_WQP(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] WQP 0x%llx\n",
+ ssolf, ent_head, i, reg);
+ wae_used--;
+ }
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_LINK(ent_head));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] LINK 0x%llx\n",
+ ssolf, ent_head, reg);
+ ent_head = reg & 0x7FF;
+ pr_info("--------------------------------------------------\n");
+ } while (ent_head && wae_used);
+}
+
+static int read_sso_pc(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES0);
+ pr_info("SSO Add-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES1);
+ pr_info("SSO Get-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES2);
+ pr_info("SSO Work-Slot active cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_NOS_CNT) & 0x1FFF;
+ pr_info("SSO work-queue entries on the no-schedule list %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_READ_ARB);
+ pr_info("SSO XAQ reads outstanding %lld\n",
+ (reg >> 24) & 0x3F);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_REQ_PC);
+ pr_info("SSO XAQ reads requests %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_LATENCY_PC);
+ pr_info("SSO XAQ read latency cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ pr_info("SSO IAQ reserved %lld\n",
+ (reg >> 16) & 0x3FFF);
+ pr_info("SSO IAQ total %lld\n", reg & 0x3FFF);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ pr_info("SSO TAQ reserved %lld\n",
+ (reg >> 16) & 0x7FF);
+ pr_info("SSO TAQ total %lld\n", reg & 0x7FF);
+ pr_info("\n");
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hwgrp_pc(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] External Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Add PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Tag Switch PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Deschedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Descheduled PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_PAGE_CNT(ssolf));
+ pr_info("SSO HWGGRP[%d] In-use Page Count 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp Threshold */
+static void read_sso_hwgrp_thresh(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] IAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_TAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] TAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_AURA(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Aura 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_LIMIT(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Limit 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IU_ACCNT(ssolf));
+ pr_info("SSO HWGGRP[%d] IU Account Index 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_IU_ACCNTX_CFG(reg & 0xFF));
+ pr_info("SSO HWGGRP[%d] IU Accounting Cfg 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp TAQ list */
+static void read_sso_hwgrp_taq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 taq_entries, wae_head;
+ struct rvu_block *block;
+ u16 ent_head, cl_used;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ taq_entries = (reg >> 48) & 0xFF;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Output Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TOAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TOAQ Status 0x%llx\n", ssolf,
+ reg);
+ ent_head = (reg >> 12) & 0x7FF;
+ cl_used = (reg >> 32) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TOAQ CL_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head, 0,
+ cl_used * taq_entries,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Input Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TIAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TIAQ Status 0x%llx\n", ssolf,
+ reg);
+ wae_head = (reg >> 60) & 0xF;
+ cl_used = (reg >> 32) & 0x7FFF;
+ ent_head = (reg >> 12) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TIAQ WAE_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head,
+ wae_head, cl_used,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IAQ list */
+static void read_sso_hwgrp_iaq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ u16 head_idx, tail_idx;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Deschedule Queue(DQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_DESCHEDX(ssolf));
+ pr_info("SSO HWGGRP[%d] DQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 0);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Conflict Queue(CQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_CONFX(ssolf));
+ pr_info("SSO HWGGRP[%d] CQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 1);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Admission Queue(AQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_IAQX(ssolf));
+ pr_info("SSO HWGGRP[%d] AQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 2);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IENT list */
+static int read_sso_hwgrp_ient_list(struct rvu *rvu)
+{
+ const char *tt_c[4] = {"SSO_TT_ORDERED_", "SSO_TT_ATOMIC__",
+ "SSO_TT_UNTAGGED", "SSO_TT_EMPTY___"};
+ struct rvu_hwinfo *hw = rvu->hw;
+ int max_idx = hw->sso.sso_iue;
+ u64 pendtag, qlinks, links;
+ int len, idx, blkaddr;
+ u64 tag, grp, wqp;
+ char str[300];
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ for (idx = 0; idx < max_idx; idx++) {
+ len = 0;
+ tag = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ grp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pendtag = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_PENDTAG(idx));
+ links = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ qlinks = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_QLINKS(idx));
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(idx));
+ len = snprintf(str + len, 300,
+ "SSO IENT[%4d] TT [%s] HWGRP [%3lld] ", idx,
+ tt_c[(tag >> 32) & 0x3], (grp >> 48) & 0x1f);
+ len += snprintf(str + len, 300 - len,
+ "TAG [0x%010llx] GRP [0x%016llx] ", tag, grp);
+ len += snprintf(str + len, 300 - len, "PENDTAG [0x%010llx] ",
+ pendtag);
+ len += snprintf(str + len, 300 - len,
+ "LINKS [0x%016llx] QLINKS [0x%010llx] ", links,
+ qlinks);
+ snprintf(str + len, 300 - len, "WQP [0x%016llx]\n", wqp);
+ pr_info("%s", str);
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp free list */
+static int read_sso_hwgrp_free_list(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+ u8 idx;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ pr_info("==================================================\n");
+ for (idx = 0; idx < 4; idx++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_FREEX(idx));
+ pr_info("SSO FREE LIST[%d]\n", idx);
+ pr_info("qnum_head : %lld qnum_tail : %lld\n",
+ (reg >> 58) & 0x3, (reg >> 56) & 0x3);
+ pr_info("queue_cnt : %llx\n", (reg >> 26) & 0x7fff);
+ pr_info("queue_val : %lld queue_head : %4lld queue_tail %4lld\n"
+ , (reg >> 40) & 0x1, (reg >> 13) & 0x1fff,
+ reg & 0x1fff);
+ pr_info("==================================================\n");
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hws_info(struct rvu *rvu, int ssowlf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int max_id;
+ u64 reg;
+ u8 mask;
+ u8 set;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssowlf < 0 || ssowlf >= block->lf.max) {
+ pr_info("Invalid SSOWLF(HWS), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssowlf = 0;
+ else
+ max_id = ssowlf + 1;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("==================================================\n");
+ for (; ssowlf < max_id; ssowlf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(ssowlf));
+ pr_info("SSOW HWS[%d] Arbitration State 0x%llx\n", ssowlf,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(ssowlf));
+ pr_info("SSOW HWS[%d] Guest Machine Control 0x%llx\n", ssowlf,
+ reg);
+ for (set = 0; set < 2; set++)
+ for (mask = 0; mask < 4; mask++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ set,
+ mask));
+ pr_info(
+ "SSOW HWS[%d] SET[%d] Group Mask[%d] 0x%llx\n",
+ ssowlf, set, mask, reg);
+ }
+ pr_info("==================================================\n");
+ }
+}
+
+typedef void (*sso_dump_cb)(struct rvu *rvu, int ssolf, bool all);
+
+static ssize_t rvu_dbg_sso_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, char *lf_type,
+ char *file_nm, sso_dump_cb fn)
+{
+ struct rvu *rvu = filp->private_data;
+ bool all = false;
+ char *cmd_buf;
+ int lf = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_sso_cmd_buffer(cmd_buf, &count, buffer,
+ &lf, &all) < 0) {
+ pr_info("Usage: echo [<%s>/all] > %s\n", lf_type, file_nm);
+ } else {
+ fn(rvu, lf, all);
+ }
+ kfree(cmd_buf);
+
+ return count;
+}
+
+/* SSO debugfs APIs */
+static ssize_t rvu_dbg_sso_pc_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_pc(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_pc_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_pc", read_sso_hwgrp_pc);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_thresh_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_thresh", read_sso_hwgrp_thresh);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_taq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_taq_wlk", read_sso_hwgrp_taq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_iaq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_iaq_wlk", read_sso_hwgrp_iaq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_ient_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_ient_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_fl_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_free_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hws_info_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hws",
+ "sso_hws_info", read_sso_hws_info);
+}
+
+RVU_DEBUG_FOPS(sso_pc, sso_pc_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_pc, NULL, sso_hwgrp_pc_display);
+RVU_DEBUG_FOPS(sso_hwgrp_thresh, NULL, sso_hwgrp_thresh_display);
+RVU_DEBUG_FOPS(sso_hwgrp_taq_wlk, NULL, sso_hwgrp_taq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_iaq_wlk, NULL, sso_hwgrp_iaq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_ient_wlk, sso_hwgrp_ient_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_fl_wlk, sso_hwgrp_fl_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hws_info, NULL, sso_hws_info_display);
+
+static void rvu_dbg_sso_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.sso = debugfs_create_dir("sso", rvu->rvu_dbg.root);
+ rvu->rvu_dbg.sso_hwgrp = debugfs_create_dir("hwgrp", rvu->rvu_dbg.sso);
+ rvu->rvu_dbg.sso_hws = debugfs_create_dir("hws", rvu->rvu_dbg.sso);
+
+ debugfs_create_file("sso_pc", 0600, rvu->rvu_dbg.sso, rvu,
+ &rvu_dbg_sso_pc_fops);
+
+ debugfs_create_file("sso_hwgrp_pc", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_pc_fops);
+
+ debugfs_create_file("sso_hwgrp_thresh", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_thresh_fops);
+
+ debugfs_create_file("sso_hwgrp_taq_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_taq_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_iaq_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_iaq_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_ient_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_ient_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_free_list_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_fl_wlk_fops);
+
+ debugfs_create_file("sso_hws_info", 0600, rvu->rvu_dbg.sso_hws,
+ rvu, &rvu_dbg_sso_hws_info_fops);
+}
+
+/* CPT debugfs APIs */
+static int parse_cpt_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, char *e_type)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken)
+ strcpy(e_type, subtoken);
+ else
+ return -EINVAL;
+
+ if (cmd_buf)
+ return -EINVAL;
+
+ if (strcmp(e_type, "SE") && strcmp(e_type, "IE") &&
+ strcmp(e_type, "AE") && strcmp(e_type, "all"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_cpt_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct rvu *rvu = s->private;
+ char *cmd_buf;
+ int ret = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_cpt_cmd_buffer(cmd_buf, &count, buffer,
+ rvu->rvu_dbg.cpt_ctx.e_type) < 0)
+ ret = -EINVAL;
+
+ kfree(cmd_buf);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t rvu_dbg_cpt_engines_sts_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_sts_display(struct seq_file *filp, void *unused)
+{
+ u64 busy_sts[2] = {0}, free_sts[2] = {0};
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0, e;
+ struct dentry *current_dir;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1) {
+ if (e < max_ses)
+ busy_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ busy_sts[1] |= 1ULL << (e - max_ses);
+ }
+ if (reg & 0x2) {
+ if (e < max_ses)
+ free_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ free_sts[1] |= 1ULL << (e - max_ses);
+ }
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx 0x%016llx\n", free_sts[1],
+ free_sts[0]);
+ seq_printf(filp, "BUSY STS : 0x%016llx 0x%016llx\n", busy_sts[1],
+ busy_sts[0]);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_sts, cpt_engines_sts_display,
+ cpt_engines_sts_write);
+
+static ssize_t rvu_dbg_cpt_engines_info_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ struct dentry *current_dir;
+ u32 e_min, e_max, e;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ seq_puts(filp, "===========================================\n");
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display,
+ cpt_engines_info_write);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct dentry *current_dir;
+ struct rvu_block *block;
+ int blkaddr;
+ u64 reg;
+ u32 lf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct dentry *current_dir;
+ u64 reg0, reg1;
+ int blkaddr;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct dentry *current_dir;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 reg;
+
+ rvu = filp->private;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
+{
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ }
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_pc_fops);
+ debugfs_create_file("cpt_engines_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_sts_fops);
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_info_fops);
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_lfs_info_fops);
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_err_info_fops);
+}
+
RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
@@ -1738,57 +3394,255 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
-static void rvu_dbg_npc_init(struct rvu *rvu)
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ case NPC_FDSA_VAL:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ seq_puts(s, "\n");
+ break;
+ }
+ }
+}
- rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npc)
- return;
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (is_npc_intf_tx(rule->intf)) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
- pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_mcam_info_fops);
- if (!pfile)
- goto create_failed;
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
- pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_rx_miss_act_fops);
- if (!pfile)
- goto create_failed;
+ return "unknown";
+}
- return;
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ bool enabled;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (is_npc_intf_rx(iter->intf)) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+ debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_info_fops);
+ debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_rules_fops);
+ debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_rx_miss_act_fops);
+}
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npc);
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
}
void rvu_dbg_init(struct rvu *rvu)
{
- struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
- rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
- if (!rvu->rvu_dbg.root) {
- dev_err(rvu->dev, "%s failed\n", __func__);
- return;
- }
- pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
- &rvu_dbg_rsrc_status_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
rvu_dbg_npa_init(rvu);
- rvu_dbg_nix_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
+ rvu_dbg_sso_init(rvu);
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir\n");
- debugfs_remove_recursive(rvu->rvu_dbg.root);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
}
void rvu_dbg_exit(struct rvu *rvu)