aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/soc/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/marvell')
-rw-r--r--drivers/soc/marvell/Kconfig85
-rw-r--r--drivers/soc/marvell/Makefile4
-rw-r--r--drivers/soc/marvell/mdio_debugfs.c119
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/dpi.c525
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/dpi.h246
-rw-r--r--drivers/soc/marvell/octeontx2-rm/Makefile11
-rw-r--r--drivers/soc/marvell/octeontx2-rm/domain_sysfs.c832
-rw-r--r--drivers/soc/marvell/octeontx2-rm/domain_sysfs.h18
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otx2_rm.c1581
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otx2_rm.h95
-rw-r--r--drivers/soc/marvell/octeontx2-rm/quota.c192
-rw-r--r--drivers/soc/marvell/octeontx2-rm/quota.h90
13 files changed, 3806 insertions, 0 deletions
diff --git a/drivers/soc/marvell/Kconfig b/drivers/soc/marvell/Kconfig
new file mode 100644
index 000000000000..67bd5ca92b18
--- /dev/null
+++ b/drivers/soc/marvell/Kconfig
@@ -0,0 +1,85 @@
+#
+# MARVELL SoC drivers
+#
+
+menu "Marvell SoC drivers"
+
+config OCTEONTX2_RM
+ tristate "OcteonTX2 RVU Resource Manager driver"
+ depends on OCTEONTX2_AF
+ ---help---
+ This driver offers resource management interfaces for Marvell's
+ OcteonTX2 Resource Virtualization Unit SSO/TIM PFs which are used
+ for interfacing with non-NIC hardware offload units.
+
+config OCTEONTX2_RM_DOM_SYSFS
+ bool "OcteonTX2 RVU Resource Manager domain sysfs"
+ depends on OCTEONTX2_RM
+ ---help---
+ Enable Application Domain sysfs which simplifies management of
+ SSO/TIM VFs and OcteonTX2 RVU based NIC devices by the system
+ administrator. This interface consists of the following files:
+
+ I. /sys/bus/pci/drivers/octeontx2-rm/0*/create_domain
+
+ Writing to this file will:
+ 1. Create a domain directory in /sys/bus/pci/drivers/octeontx2-rm/0*
+ with the domain name
+ 2. Reserve one of SSO/TIM VFs for this domain and set its limits
+ according to the specification passed in write string
+ 3. Create symlinks to all devices that will be part of the domain
+ in the directory created in point 1
+ 4. Create domain_id file returning the ID assigned to this domain
+ (effectively the domain name)
+ 5. Create domain_in_use file which reports state of domain's
+ SSO/TIM device's in_use file to indicate when domain is used
+ by an application.
+
+ The syntax for writing into this file is:
+
+ name;param:val(;param:val)*
+
+ * name - domain name
+ * param - parameter name, based on the parameter, its value 'val'
+ has to have a different format:
+ * sso, ssow, npa, tim, cpt - 'val' is an integer value of the
+ number of LFs to assign to the domain
+ * port - 'val' is in 'DDDD:BB:DD.F' format and specifies device
+ representing a port.
+
+ There are the following rules when creating a domain:
+
+ 1. Domain names must be unique
+ 2. Each domain must have at least 1 NPA and 1 SSOW LF
+ 3. One port may be only assigned to a single domain
+
+ II. /sys/bus/pci/drivers/octeontx2-rm/0*/destroy_domain
+
+ Writing domain name to this file will cause given domain to be
+ removed from the sysfs. This includes:
+ 1. Setting all limits of domain's SSO/TIM device to 0
+ 2. Removing all sysfs structures for this domain
+ 3. Removing all ports in this application domain from the list of
+ ports in use.
+
+ Removal of the domain is disabled while domain is in use, that
+ is while the 'in_use' flag of the domain's SSO/TIM device is set.
+ User/admin may query the status of this flag using the
+ 'domain_in_use' file in the domain's sysfs directory.
+
+config OCTEONTX2_DPI_PF
+ tristate "OcteonTX2 DPI-DMA PF driver"
+ depends on ARM64 && PCI
+ ---help---
+ Select this option to enable DPI PF driver support.
+ DPI (DMA packet interface) provides DMA support for MAC.
+ This driver intializes dpi PF device and enables VF's for supporting
+ different types of DMA transfers.
+
+config MDIO_DEBUGFS
+ tristate "Stub driver for debugfs support for mdio commands "
+ depends on OCTEONTX2_AF
+ help
+ provides debugfs support to initiate mdio commands via smc call
+ to the atf.
+endmenu
diff --git a/drivers/soc/marvell/Makefile b/drivers/soc/marvell/Makefile
new file mode 100644
index 000000000000..cdc062497815
--- /dev/null
+++ b/drivers/soc/marvell/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += octeontx2-rm/
+obj-y += octeontx2-dpi/
+obj-$(CONFIG_MDIO_DEBUGFS) += mdio_debugfs.o
diff --git a/drivers/soc/marvell/mdio_debugfs.c b/drivers/soc/marvell/mdio_debugfs.c
new file mode 100644
index 000000000000..652108960499
--- /dev/null
+++ b/drivers/soc/marvell/mdio_debugfs.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/arm-smccc.h>
+
+#define OCTEONTX_MDIO_DBG_READ 0xc2000d01
+#define OCTEONTX_MDIO_DBG_WRITE 0xc2000d02
+
+struct dentry *pfile;
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer,
+ int *a, bool *write)
+{
+ int bytes_not_copied;
+ char *subtoken;
+ int ret, i;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ for (i = 0; i < 5; i++) {
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &a[i]) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf) {
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &a[i]) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ *write = true;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t dbg_mdio_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ bool write = false;
+ char *cmd_buf;
+ int ret, a[6];
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, a, &write);
+ if (ret < 0) {
+ pr_info("Usage: echo <cgxlmac> <mode> <addr> <devad> <reg> [value] > mdio_cmd\n");
+ goto done;
+ } else {
+ if (write)
+ arm_smccc_smc(OCTEONTX_MDIO_DBG_WRITE, a[0], a[1], a[2],
+ a[3], a[4], a[5], 0, &res);
+ else
+ arm_smccc_smc(OCTEONTX_MDIO_DBG_READ, a[0], a[1], a[2],
+ a[3], a[4], 0, 0, &res);
+ pr_info("MDIO COMMAND RESULT\n");
+ pr_info("===================\n");
+ pr_info("res[0]:\t%ld\n", res.a0);
+ pr_info("res[1]:\t%ld\n", res.a1);
+ pr_info("res[2]:\t%ld\n", res.a2);
+ pr_info("res[3]:\t%ld\n", res.a3);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static const struct file_operations dbg_mdio_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = dbg_mdio_write,
+};
+
+static int dbg_mdio_init(void)
+{
+ pfile = debugfs_create_file("mdio_cmd", 0644, NULL, NULL,
+ &dbg_mdio_fops);
+ if (!pfile)
+ goto create_failed;
+ return 0;
+create_failed:
+ pr_err("Failed to create debugfs dir/file for mdio_cmd\n");
+ debugfs_remove_recursive(pfile);
+ return 0;
+}
+
+static void dbg_mdio_exit(void)
+{
+ debugfs_remove_recursive(pfile);
+}
+module_init(dbg_mdio_init);
+module_exit(dbg_mdio_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soc/marvell/octeontx2-dpi/Makefile b/drivers/soc/marvell/octeontx2-dpi/Makefile
new file mode 100644
index 000000000000..73640517593c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 DPI PF driver
+#
+
+obj-$(CONFIG_OCTEONTX2_DPI_PF) += octeontx2_dpi.o
+
+octeontx2_dpi-y := dpi.o
diff --git a/drivers/soc/marvell/octeontx2-dpi/dpi.c b/drivers/soc/marvell/octeontx2-dpi/dpi.c
new file mode 100644
index 000000000000..1be37f3b5d50
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/dpi.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 DPI PF driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+
+#include "dpi.h"
+
+#define DPI_DRV_NAME "octeontx2-dpi"
+#define DPI_DRV_STRING "Marvell OcteonTX2 DPI-DMA Driver"
+#define DPI_DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id dpi_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_DPI_PF) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, dpi_id_table);
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DPI_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DPI_DRV_VERSION);
+
+static void dpi_reg_write(struct dpipf *dpi, u64 offset, u64 val)
+{
+ writeq(val, dpi->reg_base + offset);
+}
+
+static u64 dpi_reg_read(struct dpipf *dpi, u64 offset)
+{
+ return readq(dpi->reg_base + offset);
+}
+
+static int dpi_dma_engine_get_num(void)
+{
+ return DPI_MAX_ENGINES;
+}
+
+int dpi_queue_init(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf)
+{
+ int engine = 0;
+ int queue = vf;
+ u64 reg = 0ULL;
+ u32 aura = dpivf->vf_config.aura;
+ u16 buf_size = dpivf->vf_config.csize;
+ u16 sso_pf_func = dpivf->vf_config.sso_pf_func;
+ u16 npa_pf_func = dpivf->vf_config.npa_pf_func;
+
+ dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(queue),
+ DPI_DMA_IBUFF_CSIZE_CSIZE((u64)(buf_size / 8)));
+
+ /* IDs are already configured while crating the domains.
+ * No need to configure here.
+ */
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ /* Dont configure the queus for PKT engines */
+ if (engine >= 4)
+ break;
+
+ reg = 0;
+ reg = dpi_reg_read(dpi, DPI_DMA_ENGX_EN(engine));
+ reg |= DPI_DMA_ENG_EN_QEN(0x1 << queue);
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg);
+ }
+
+ reg = dpi_reg_read(dpi, DPI_DMAX_IDS2(queue));
+ reg |= DPI_DMA_IDS2_INST_AURA(aura);
+ dpi_reg_write(dpi, DPI_DMAX_IDS2(queue), reg);
+
+ reg = dpi_reg_read(dpi, DPI_DMAX_IDS(queue));
+ reg |= DPI_DMA_IDS_DMA_NPA_PF_FUNC(npa_pf_func);
+ reg |= DPI_DMA_IDS_DMA_SSO_PF_FUNC(sso_pf_func);
+ reg |= DPI_DMA_IDS_DMA_STRM(vf + 1);
+ reg |= DPI_DMA_IDS_INST_STRM(vf + 1);
+ dpi_reg_write(dpi, DPI_DMAX_IDS(queue), reg);
+
+ return 0;
+}
+
+int dpi_queue_fini(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf)
+{
+ u64 reg = 0ULL;
+ int engine = 0;
+ int queue = vf;
+ u16 buf_size = dpivf->vf_config.csize;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ /* Dont configure the queus for PKT engines */
+ if (engine >= 4)
+ break;
+
+ reg = 0;
+ reg = dpi_reg_read(dpi, DPI_DMA_ENGX_EN(engine));
+ reg &= DPI_DMA_ENG_EN_QEN((~(1 << queue)));
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg);
+ }
+
+ dpi_reg_write(dpi, DPI_DMAX_QRST(queue), 0x1ULL);
+ /* TBD: below code required ? */
+ dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(queue),
+ DPI_DMA_IBUFF_CSIZE_CSIZE((u64)(buf_size)));
+
+ /* Reset IDS and IDS2 registers */
+ dpi_reg_write(dpi, DPI_DMAX_IDS2(queue), 0ULL);
+ dpi_reg_write(dpi, DPI_DMAX_IDS(queue), 0ULL);
+
+ return 0;
+}
+
+/**
+ * Global initialization of DPI
+ *
+ * @return Zero on success, negative on failure
+ */
+int dpi_init(struct dpipf *dpi)
+{
+ int engine = 0;
+ u64 reg = 0ULL;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ if (engine == 4 || engine == 5)
+ reg = DPI_ENG_BUF_BLKS(8);
+ else
+ reg = DPI_ENG_BUF_BLKS(4);
+
+ dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg);
+
+ /* Here qmap for the engines are set to 0.
+ * No dpi queues are mapped to engines.
+ * When a VF is initialised corresponding bit
+ * in the qmap will be set for all engines.
+ */
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), 0x0ULL);
+ }
+
+ reg = 0ULL;
+ reg = (DPI_DMA_CONTROL_ZBWCSEN | DPI_DMA_CONTROL_PKT_EN |
+ DPI_DMA_CONTROL_LDWB | DPI_DMA_CONTROL_O_MODE |
+ DPI_DMA_CONTROL_DMA_ENB(0xfULL));
+
+ dpi_reg_write(dpi, DPI_DMA_CONTROL, reg);
+ dpi_reg_write(dpi, DPI_CTL, DPI_CTL_EN);
+
+ return 0;
+}
+
+int dpi_fini(struct dpipf *dpi)
+{
+ int engine = 0;
+ u64 reg = 0ULL;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+
+ dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg);
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), 0x0ULL);
+ }
+
+ reg = 0ULL;
+ dpi_reg_write(dpi, DPI_DMA_CONTROL, reg);
+ dpi_reg_write(dpi, DPI_CTL, ~DPI_CTL_EN);
+
+ return 0;
+}
+
+int dpi_queue_reset(struct dpipf *dpi, u16 queue)
+{
+ /* TODO: add support */
+ return 0;
+}
+
+static irqreturn_t dpi_pf_intr_handler (int irq, void *dpi_irq)
+{
+ u64 reg_val = 0;
+ int i = 0;
+ struct dpipf *dpi = (struct dpipf *)dpi_irq;
+
+ dev_err(&dpi->pdev->dev, "intr received: %d\n", irq);
+
+ /* extract MSIX vector number from irq number. */
+ while (irq != pci_irq_vector(dpi->pdev, i)) {
+ i++;
+ if (i > dpi->num_vec)
+ break;
+ }
+ if (i < DPI_REQQX_INT_IDX) {
+ reg_val = dpi_reg_read(dpi, DPI_DMA_CCX_INT(i));
+ dev_err(&dpi->pdev->dev, "DPI_CC%d_INT raised: 0x%016llx\n",
+ i, reg_val);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), 0x1ULL);
+ } else if (i < DPI_SDP_FLR_RING_LINTX_IDX) {
+ reg_val = dpi_reg_read(
+ dpi, DPI_REQQX_INT(i - DPI_REQQX_INT_IDX));
+ dev_err(&dpi->pdev->dev,
+ "DPI_REQQ_INT raised for q:%d: 0x%016llx\n",
+ (i - 0x40), reg_val);
+
+ dpi_reg_write(
+ dpi, DPI_REQQX_INT(i - DPI_REQQX_INT_IDX), reg_val);
+
+ if (reg_val & (0x71ULL))
+ dpi_queue_reset(dpi, (i - DPI_REQQX_INT_IDX));
+ } else if (i < DPI_SDP_IRE_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_FLR_RING_LINTX raised\n");
+
+ } else if (i < DPI_SDP_ORE_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_IRE_LINTX raised\n");
+
+ } else if (i < DPI_SDP_ORD_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_ORE_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_PP_VF_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_ORD_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_DMA_VF_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_PP_VF_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_MISC_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_DMA_VF_LINTX raised\n");
+
+ } else if (i < DPI_PF_RAS_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_MISC_LINTX raised\n");
+
+ } else if (i == DPI_PF_RAS_IDX) {
+ reg_val = dpi_reg_read(dpi, DPI_PF_RAS);
+ dev_err(&dpi->pdev->dev, "DPI_PF_RAS raised: 0x%016llx\n",
+ reg_val);
+ dpi_reg_write(dpi, DPI_PF_RAS, reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+static int dpi_irq_init(struct dpipf *dpi)
+{
+ int i, irq = 0;
+ int ret = 0;
+
+ /* Clear All Interrupts */
+ dpi_reg_write(dpi, DPI_PF_RAS, DPI_PF_RAS_INT);
+
+ /* Clear All Enables */
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT);
+
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++) {
+ dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT);
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT);
+ }
+
+ for (i = 0; i < DPI_MAX_CC_INT; i++) {
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT);
+ }
+
+ dpi->num_vec = pci_msix_vec_count(dpi->pdev);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(dpi->pdev, dpi->num_vec,
+ dpi->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dpi->pdev->dev,
+ "DPIPF: Request for %d msix vectors failed, ret %d\n",
+ dpi->num_vec, ret);
+ goto alloc_fail;
+ }
+
+ for (irq = 0; irq < dpi->num_vec; irq++) {
+ ret = request_irq(pci_irq_vector(dpi->pdev, irq),
+ dpi_pf_intr_handler, 0, "DPIPF", dpi);
+ if (ret) {
+ dev_err(&dpi->pdev->dev,
+ "DPIPF: IRQ(%d) registration failed for DPIPF\n",
+ irq);
+ goto fail;
+ }
+ }
+
+#define ENABLE_DPI_INTERRUPTS 0
+#if ENABLE_DPI_INTERRUPTS
+ /*Enable All Interrupts */
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++)
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1S(i), DPI_REQQ_INT);
+
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1S, DPI_PF_RAS_INT);
+#endif
+ return 0;
+fail:
+ if (irq) {
+ for (i = 0; i <= irq; i++)
+ free_irq(pci_irq_vector(dpi->pdev, i), dpi);
+ }
+ pci_free_irq_vectors(dpi->pdev);
+alloc_fail:
+ dpi->num_vec = 0;
+ return ret;
+}
+
+static void dpi_irq_free(struct dpipf *dpi)
+{
+ int i = 0;
+
+ /* Clear All Enables */
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT);
+
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++) {
+ dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT);
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT);
+ }
+
+ for (i = 0; i < DPI_MAX_CC_INT; i++) {
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT);
+ }
+
+ for (i = 0; i < dpi->num_vec; i++)
+ free_irq(pci_irq_vector(dpi->pdev, i), dpi);
+
+ pci_free_irq_vectors(dpi->pdev);
+ dpi->num_vec = 0;
+}
+
+static int dpi_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (numvfs == 0) {
+ pci_disable_sriov(pdev);
+ dpi->total_vfs = 0;
+ } else {
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret == 0) {
+ dpi->total_vfs = numvfs;
+ ret = numvfs;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t dpi_show_config(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < DPI_MAX_VFS; vf_idx++) {
+ struct dpipf_vf *dpivf = &dpi->vf[vf_idx];
+
+ if (!dpivf->setup_done)
+ continue;
+ sprintf(buf + strlen(buf),
+ "VF:%d command buffer size:%d aura:%d",
+ vf_idx, dpivf->vf_config.csize, dpivf->vf_config.aura);
+ sprintf(buf + strlen(buf),
+ "sso_pf_func:%x npa_pf_func:%x\n",
+ dpivf->vf_config.sso_pf_func,
+ dpivf->vf_config.npa_pf_func);
+ }
+ return strlen(buf);
+}
+
+static ssize_t dpi_write_config(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ union dpi_mbox_message_t mbox_msg = {.u[0] = 0ULL, .u[1] = 0ULL};
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ struct dpipf_vf *dpivf;
+
+ memcpy(&mbox_msg, buf, count);
+ if (mbox_msg.s.vfid > DPI_MAX_VFS) {
+ dev_err(dev, "Invalid vfid:%d\n", mbox_msg.s.vfid);
+ return -1;
+ }
+ dpivf = &dpi->vf[mbox_msg.s.vfid];
+
+ switch (mbox_msg.s.cmd) {
+ case DPI_QUEUE_OPEN:
+ dpivf->vf_config.aura = mbox_msg.s.aura;
+ dpivf->vf_config.csize = mbox_msg.s.csize;
+ dpivf->vf_config.sso_pf_func = mbox_msg.s.sso_pf_func;
+ dpivf->vf_config.npa_pf_func = mbox_msg.s.npa_pf_func;
+ dpi_queue_init(dpi, dpivf, mbox_msg.s.vfid);
+ dpivf->setup_done = true;
+ break;
+ case DPI_QUEUE_CLOSE:
+ dpivf->vf_config.aura = 0;
+ dpivf->vf_config.csize = 0;
+ dpivf->vf_config.sso_pf_func = 0;
+ dpivf->vf_config.npa_pf_func = 0;
+ dpi_queue_fini(dpi, dpivf, mbox_msg.s.vfid);
+ dpivf->setup_done = false;
+ break;
+ default:
+ return -1;
+ }
+
+ return sizeof(mbox_msg);
+}
+
+static DEVICE_ATTR(dpi_device_config, 0660,
+ dpi_show_config, dpi_write_config);
+
+static int dpi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct dpipf *dpi;
+ int err;
+
+ dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
+ dpi->pdev = pdev;
+
+ pci_set_drvdata(pdev, dpi);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DPI_DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ dpi->reg_base = pcim_iomap(pdev, PCI_DPI_PF_CFG_BAR, 0);
+ if (!dpi->reg_base) {
+ dev_err(dev, "DPI: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Initialize global PF registers */
+ err = dpi_init(dpi);
+ if (err) {
+ dev_err(dev, "DPI: Failed to initialize dpi\n");
+ goto err_release_regions;
+ }
+
+ /* Register interrupts */
+ err = dpi_irq_init(dpi);
+ if (err) {
+ dev_err(dev, "DPI: Failed to initialize irq vectors\n");
+ goto err_dpi_fini;
+ }
+
+ err = device_create_file(dev, &dev_attr_dpi_device_config);
+ if (err) {
+ dev_err(dev, "DPI: Failed to create sysfs entry for driver\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ dpi_irq_free(dpi);
+err_dpi_fini:
+ dpi_fini(dpi);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, dpi);
+ return err;
+}
+
+static void dpi_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+
+ device_remove_file(dev, &dev_attr_dpi_device_config);
+ dpi_irq_free(dpi);
+ dpi_fini(dpi);
+ dpi_sriov_configure(pdev, 0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, dpi);
+}
+
+static struct pci_driver dpi_driver = {
+ .name = DPI_DRV_NAME,
+ .id_table = dpi_id_table,
+ .probe = dpi_probe,
+ .remove = dpi_remove,
+ .sriov_configure = dpi_sriov_configure,
+};
+
+static int __init dpi_init_module(void)
+{
+ pr_info("%s: %s\n", DPI_DRV_NAME, DPI_DRV_STRING);
+
+ return pci_register_driver(&dpi_driver);
+}
+
+static void __exit dpi_cleanup_module(void)
+{
+ pci_unregister_driver(&dpi_driver);
+}
+
+module_init(dpi_init_module);
+module_exit(dpi_cleanup_module);
diff --git a/drivers/soc/marvell/octeontx2-dpi/dpi.h b/drivers/soc/marvell/octeontx2-dpi/dpi.h
new file mode 100644
index 000000000000..2d0cf04524b7
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/dpi.h
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 DPI PF driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DPI_H__
+#define __DPI_H__
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_DPI_PF 0xA080
+#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
+
+/* PCI BAR nos */
+#define PCI_DPI_PF_CFG_BAR 0
+#define PCI_DPI_PF_MSIX_BAR 4
+#define PCI_DPI_VF_CFG_BAR 0
+#define PCI_DPI_VF_MSIX_BAR 4
+#define DPI_VF_CFG_SIZE 0x100000
+#define DPI_VF_OFFSET(x) (0x20000000 | 0x100000 * (x))
+
+/* MSI-X interrupts */
+#define DPI_VF_MSIX_COUNT 1
+#define DPI_MAX_REQQ_INT 8
+#define DPI_MAX_CC_INT 64
+
+/* MSI-X interrupt vectors indexes */
+#define DPI_CCX_INT_IDX 0x0
+#define DPI_REQQX_INT_IDX 0x40
+#define DPI_SDP_FLR_RING_LINTX_IDX 0x48
+#define DPI_SDP_IRE_LINTX_IDX 0x4C
+#define DPI_SDP_ORE_LINTX_IDX 0x50
+#define DPI_SDP_ORD_LINTX_IDX 0x54
+#define DPI_EPFX_PP_VF_LINTX_IDX 0x58
+#define DPI_EPFX_DMA_VF_LINTX_IDX 0x78
+#define DPI_EPFX_MISC_LINTX_IDX 0x98
+#define DPI_PF_RAS_IDX 0xA8
+
+#define DPI_MAX_ENGINES 6
+#define DPI_MAX_VFS 8
+
+/**************** Macros for register modification ************/
+#define DPI_DMA_IBUFF_CSIZE_CSIZE(x) ((x) & 0x1fff)
+#define DPI_DMA_IBUFF_CSIZE_GET_CSIZE(x) ((x) & 0x1fff)
+
+#define DPI_DMA_IDS_INST_STRM(x) ((uint64_t)((x) & 0xff) << 40)
+#define DPI_DMA_IDS_GET_INST_STRM(x) (((x) >> 40) & 0xff)
+
+#define DPI_DMA_IDS_DMA_STRM(x) ((uint64_t)((x) & 0xff) << 32)
+#define DPI_DMA_IDS_GET_DMA_STRM(x) (((x) >> 32) & 0xff)
+
+#define DPI_DMA_IDS_DMA_NPA_PF_FUNC(x) ((uint64_t)((x) & 0xffff) << 16)
+#define DPI_DMA_IDS_GET_DMA_NPA_PF_FUNC(x) (((x) >> 16) & 0xffff)
+
+#define DPI_DMA_IDS_DMA_SSO_PF_FUNC(x) ((uint64_t)((x) & 0xffff))
+#define DPI_DMA_IDS_GET_DMA_SSO_PF_FUNC(x) ((x) & 0xffff)
+
+#define DPI_DMA_IDS2_INST_AURA(x) ((uint64_t)((x) & 0xfffff))
+#define DPI_DMA_IDS2_GET_INST_AURA(x) ((x) & 0xfffff)
+
+#define DPI_ENG_BUF_BLKS(x) ((x) & 0x1fULL)
+#define DPI_ENG_BUF_GET_BLKS(x) ((x) & 0x1fULL)
+
+#define DPI_ENG_BUF_BASE(x) (((x) & 0x3fULL) << 16)
+#define DPI_ENG_BUF_GET_BASE(x) (((x) >> 16) & 0x3fULL)
+
+#define DPI_DMA_ENG_EN_QEN(x) ((x) & 0xffULL)
+#define DPI_DMA_ENG_EN_GET_QEN(x) ((x) & 0xffULL)
+
+#define DPI_DMA_ENG_EN_MOLR(x) (((x) & 0x3ffULL) << 32)
+#define DPI_DMA_ENG_EN_GET_MOLR(x) (((x) >> 32) & 0x3ffULL)
+
+#define DPI_DMA_CONTROL_DMA_ENB(x) (((x) & 0x3fULL) << 48)
+#define DPI_DMA_CONTROL_GET_DMA_ENB(x) (((x) >> 48) & 0x3fULL)
+
+#define DPI_DMA_CONTROL_O_ES(x) (((x) & 0x3ULL) << 15)
+#define DPI_DMA_CONTROL_GET_O_ES(x) (((x) >> 15) & 0x3ULL)
+
+#define DPI_DMA_CONTROL_O_MODE (0x1ULL << 14)
+#define DPI_DMA_CONTROL_O_NS (0x1ULL << 17)
+#define DPI_DMA_CONTROL_O_RO (0x1ULL << 18)
+#define DPI_DMA_CONTROL_O_ADD1 (0x1ULL << 19)
+#define DPI_DMA_CONTROL_LDWB (0x1ULL << 32)
+#define DPI_DMA_CONTROL_NCB_TAG_DIS (0x1ULL << 34)
+#define DPI_DMA_CONTROL_ZBWCSEN (0x1ULL << 39)
+#define DPI_DMA_CONTROL_WQECSDIS (0x1ULL << 47)
+#define DPI_DMA_CONTROL_UIO_DIS (0x1ULL << 55)
+#define DPI_DMA_CONTROL_PKT_EN (0x1ULL << 56)
+#define DPI_DMA_CONTROL_FFP_DIS (0x1ULL << 59)
+
+#define DPI_CTL_EN (0x1ULL)
+
+/******************** macros for Interrupts ************************/
+#define DPI_DMA_CC_INT (0x1ULL)
+
+#define DPI_REQQ_INT_INSTRFLT (0x1ULL)
+#define DPI_REQQ_INT_RDFLT (0x1ULL << 1)
+#define DPI_REQQ_INT_WRFLT (0x1ULL << 2)
+#define DPI_REQQ_INT_CSFLT (0x1ULL << 3)
+#define DPI_REQQ_INT_INST_DBO (0x1ULL << 4)
+#define DPI_REQQ_INT_INST_ADDR_NULL (0x1ULL << 5)
+#define DPI_REQQ_INT_INST_FILL_INVAL (0x1ULL << 6)
+#define DPI_REQQ_INT_INSTR_PSN (0x1ULL << 7)
+
+#define DPI_REQQ_INT \
+ (DPI_REQQ_INT_INSTRFLT | \
+ DPI_REQQ_INT_RDFLT | \
+ DPI_REQQ_INT_WRFLT | \
+ DPI_REQQ_INT_CSFLT | \
+ DPI_REQQ_INT_INST_DBO | \
+ DPI_REQQ_INT_INST_ADDR_NULL | \
+ DPI_REQQ_INT_INST_FILL_INVAL | \
+ DPI_REQQ_INT_INSTR_PSN)
+
+#define DPI_PF_RAS_EBI_DAT_PSN (0x1ULL)
+#define DPI_PF_RAS_NCB_DAT_PSN (0x1ULL << 1)
+#define DPI_PF_RAS_NCB_CMD_PSN (0x1ULL << 2)
+#define DPI_PF_RAS_INT \
+ (DPI_PF_RAS_EBI_DAT_PSN | \
+ DPI_PF_RAS_NCB_DAT_PSN | \
+ DPI_PF_RAS_NCB_CMD_PSN)
+
+
+/***************** Registers ******************/
+#define DPI_DMAX_IBUFF_CSIZE(x) (0x0ULL | ((x) << 11))
+#define DPI_DMAX_REQBANK0(x) (0x8ULL | ((x) << 11))
+#define DPI_DMAX_REQBANK1(x) (0x10ULL | ((x) << 11))
+#define DPI_DMAX_IDS(x) (0x18ULL | ((x) << 11))
+#define DPI_DMAX_IDS2(x) (0x20ULL | ((x) << 11))
+#define DPI_DMAX_IFLIGHT(x) (0x28ULL | ((x) << 11))
+#define DPI_DMAX_QRST(x) (0x30ULL | ((x) << 11))
+#define DPI_DMAX_ERR_RSP_STATUS(x) (0x38ULL | ((x) << 11))
+
+#define DPI_CSCLK_ACTIVE_PC (0x4000ULL)
+#define DPI_CTL (0x4010ULL)
+#define DPI_DMA_CONTROL (0x4018ULL)
+#define DPI_DMA_ENGX_EN(x) (0x4040ULL | ((x) << 3))
+#define DPI_REQ_ERR_RSP (0x4078ULL)
+#define DPI_REQ_ERR_RSP_EN (0x4088ULL)
+#define DPI_PKT_ERR_RSP (0x4098ULL)
+#define DPI_NCB_CFG (0x40A0ULL)
+#define DPI_BP_TEST0 (0x40B0ULL)
+#define DPI_ENGX_BUF(x) (0x40C0ULL | ((x) << 3))
+#define DPI_EBUS_RECAL (0x40F0ULL)
+#define DPI_EBUS_PORTX_CFG(x) (0x4100ULL | ((x) << 3))
+#define DPI_EBUS_PORTX_SCFG(x) (0x4180ULL | ((x) << 3))
+#define DPI_EBUS_PORTX_ERR_INFO(x) (0x4200ULL | ((x) << 3))
+#define DPI_EBUS_PORTX_ERR(x) (0x4280ULL | ((x) << 3))
+#define DPI_INFO_REG (0x4300ULL)
+#define DPI_PF_RAS (0x4308ULL)
+#define DPI_PF_RAS_W1S (0x4310ULL)
+#define DPI_PF_RAS_ENA_W1C (0x4318ULL)
+#define DPI_PF_RAS_ENA_W1S (0x4320ULL)
+#define DPI_DMA_CCX_INT(x) (0x5000ULL | ((x) << 3))
+#define DPI_DMA_CCX_INT_W1S(x) (0x5400ULL | ((x) << 3))
+#define DPI_DMA_CCX_INT_ENA_W1C(x) (0x5800ULL | ((x) << 3))
+#define DPI_DMA_CCX_INT_ENA_W1S(x) (0x5C00ULL | ((x) << 3))
+#define DPI_DMA_CCX_CNT(x) (0x6000ULL | ((x) << 3))
+#define DPI_REQQX_INT(x) (0x6600ULL | ((x) << 3))
+#define DPI_REQQX_INT_W1S(x) (0x6640ULL | ((x) << 3))
+#define DPI_REQQX_INT_ENA_W1C(x) (0x6680ULL | ((x) << 3))
+#define DPI_REQQX_INT_ENA_W1S(x) (0x66C0ULL | ((x) << 3))
+#define DPI_EPFX_DMA_VF_LINTX(x, y) (0x6800ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_DMA_VF_LINTX_W1S(x, y) (0x6A00ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_DMA_VF_LINTX_ENA_W1C(x, y) (0x6C00ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_DMA_VF_LINTX_ENA_W1S(x, y) (0x6E00ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_MISC_LINT(x) (0x7000ULL | ((x) << 5))
+#define DPI_EPFX_MISC_LINT_W1S(x) (0x7008ULL | ((x) << 5))
+#define DPI_EPFX_MISC_LINT_ENA_W1C(x) (0x7010ULL | ((x) << 5))
+#define DPI_EPFX_MISC_LINT_ENA_W1S(x) (0x7018ULL | ((x) << 5))
+#define DPI_EPFX_PP_VF_LINTX(x, y) (0x7200ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_PP_VF_LINTX_W1S(x, y) (0x7400ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_PP_VF_LINTX_ENA_W1C(x, y) (0x7600ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_PP_VF_LINTX_ENA_W1S(x, y) (0x7800ULL | ((x) << 5) |\
+ ((y) << 4))
+/* VF Registers: */
+#define DPI_VDMA_EN (0x0ULL)
+#define DPI_VDMA_REQQ_CTL (0x8ULL)
+#define DPI_VDMA_DBELL (0x10ULL)
+#define DPI_VDMA_SADDR (0x18ULL)
+#define DPI_VDMA_COUNTS (0x20ULL)
+#define DPI_VDMA_NADDR (0x28ULL)
+#define DPI_VDMA_IWBUSY (0x30ULL)
+#define DPI_VDMA_CNT (0x38ULL)
+#define DPI_VF_INT (0x100ULL)
+#define DPI_VF_INT_W1S (0x108ULL)
+#define DPI_VF_INT_ENA_W1C (0x110ULL)
+#define DPI_VF_INT_ENA_W1S (0x118ULL)
+
+struct dpivf_config {
+ uint16_t csize;
+ uint32_t aura;
+ uint16_t sso_pf_func;
+ uint16_t npa_pf_func;
+};
+
+struct dpipf_vf {
+ uint8_t this_vfid;
+ bool setup_done;
+ struct dpivf_config vf_config;
+};
+
+struct dpipf {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ int num_vec;
+ struct msix_entry *msix_entries;
+ int total_vfs;
+ int vfs_in_use;
+ struct dpipf_vf vf[DPI_MAX_VFS];
+};
+
+#define DPI_QUEUE_OPEN 0x1
+#define DPI_QUEUE_CLOSE 0x2
+#define DPI_REG_DUMP 0x3
+#define DPI_GET_REG_CFG 0x4
+
+union dpi_mbox_message_t {
+ uint64_t u[2];
+ struct dpi_mbox_message_s {
+ /* VF ID to configure */
+ uint64_t vfid :4;
+ /* Command code */
+ uint64_t cmd :4;
+ /* Command buffer size in 8-byte words */
+ uint64_t csize :14;
+ /* aura of the command buffer */
+ uint64_t aura :20;
+ /* SSO PF function */
+ uint64_t sso_pf_func :16;
+ /* NPA PF function */
+ uint64_t npa_pf_func :16;
+ } s;
+};
+#endif
diff --git a/drivers/soc/marvell/octeontx2-rm/Makefile b/drivers/soc/marvell/octeontx2-rm/Makefile
new file mode 100644
index 000000000000..bab787b56b43
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 SSO/TIM RVU device driver
+#
+
+obj-$(CONFIG_OCTEONTX2_RM) += octeontx2_rm.o
+
+octeontx2_rm-y := otx2_rm.o quota.o
+octeontx2_rm-$(CONFIG_OCTEONTX2_RM_DOM_SYSFS) += domain_sysfs.o
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+ccflags-y += -I$(srctree)/drivers/soc/marvell/octeontx2-dpi/
diff --git a/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c
new file mode 100644
index 000000000000..9101edea8118
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c
@@ -0,0 +1,832 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sysfs.h>
+#include "domain_sysfs.h"
+#include "otx2_rm.h"
+#include "dpi.h"
+
+#define DOMAIN_NAME_LEN 32
+#define PCI_SCAN_FMT "%04x:%02x:%02x.%02x"
+
+/* The format of DP is: DP(_name, _param_type, _scanf_fmt) */
+#define DOM_PARAM_SPEC \
+DP(ssow, int, "%d") \
+DP(sso, int, "%d") \
+DP(npa, int, "%d") \
+DP(cpt, int, "%d") \
+DP(tim, int, "%d") \
+DP(dpi, int, "%d")
+
+struct domain_params {
+ const char *name;
+#define DP(_name, _type, _1) \
+ _type _name;
+DOM_PARAM_SPEC
+#undef DP
+ const char *ports[RM_MAX_PORTS];
+ u16 port_cnt;
+};
+
+struct domain {
+ char name[DOMAIN_NAME_LEN];
+ struct kobj_attribute domain_id;
+ struct kobj_attribute domain_in_use;
+ /* List of all ports attached to the domain */
+ struct rvu_port *ports;
+ struct kobject *kobj;
+ struct rvu_vf *rvf;
+ int port_count;
+ bool in_use;
+};
+
+struct rvu_port {
+ /* handle in global list of ports associated to all domains */
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct domain *domain;
+};
+
+struct dpi_vf {
+ struct pci_dev *pdev;
+ /* pointer to the kobject which owns this vf */
+ struct kobject *domain_kobj;
+ int vf_id;
+ bool in_use;
+};
+
+struct dpi_info {
+ /* Total number of vfs available */
+ uint8_t num_vfs;
+ /* Free vfs */
+ uint8_t vfs_free;
+ /* Pointer to the vfs available */
+ struct dpi_vf *dpi_vf;
+};
+
+struct domain_sysfs {
+ struct list_head list;
+ struct kobj_attribute create_domain;
+ struct kobj_attribute destroy_domain;
+ struct kobj_attribute pmccntr_el0;
+ /* List of all ports added to all domains. Used for validating if new
+ * domain creation doesn't want to take an already taken port.
+ */
+ struct list_head ports;
+ struct rm_dev *rdev;
+ struct kobject *parent;
+ struct domain *domains;
+ size_t domains_len;
+ struct dpi_info dpi_info;
+};
+
+static DEFINE_MUTEX(domain_sysfs_lock);
+static LIST_HEAD(domain_sysfs_list);
+
+static ssize_t
+domain_id_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct domain *dom = container_of(attr, struct domain, domain_id);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", dom->name);
+}
+
+static ssize_t
+domain_in_use_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct domain *dom = container_of(attr, struct domain, domain_in_use);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", dom->rvf->in_use);
+}
+
+static int do_destroy_domain(struct domain_sysfs *lsfs, struct domain *domain)
+{
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ int i;
+
+ if (domain->rvf->in_use) {
+ dev_err(dev, "Domain %s is in use.\n", domain->name);
+ return -EBUSY;
+ }
+
+ sysfs_remove_file(domain->kobj, &domain->domain_id.attr);
+ domain->domain_id.attr.mode = 0;
+ sysfs_remove_file(domain->kobj, &domain->domain_in_use.attr);
+ domain->domain_in_use.attr.mode = 0;
+ for (i = 0; i < domain->port_count; i++) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(domain->ports[i].pdev));
+ }
+
+ for (i = 0; i < lsfs->dpi_info.num_vfs; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[i];
+ /* Identify the devices belongs to this domain */
+ if (dpivf_ptr->in_use &&
+ dpivf_ptr->domain_kobj == domain->kobj) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(dpivf_ptr->pdev));
+ dpivf_ptr->in_use = false;
+ dpivf_ptr->domain_kobj = NULL;
+ lsfs->dpi_info.vfs_free++;
+ }
+ }
+
+ sysfs_remove_link(domain->kobj, pci_name(domain->rvf->pdev));
+ kobject_del(domain->kobj);
+ mutex_lock(&lsfs->rdev->lock);
+ // restore limits
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = 0;
+ mutex_unlock(&lsfs->rdev->lock);
+
+ mutex_lock(&domain_sysfs_lock);
+ // FREE ALL allocated ports
+ for (i = 0; i < domain->port_count; i++) {
+ list_del(&domain->ports[i].list);
+ pci_dev_put(domain->ports[i].pdev);
+ }
+ kfree(domain->ports);
+ domain->ports = NULL;
+ domain->port_count = 0;
+ domain->in_use = false;
+ domain->name[0] = '\0';
+ mutex_unlock(&domain_sysfs_lock);
+
+ return 0;
+}
+
+static int
+do_create_domain(struct domain_sysfs *lsfs, struct domain_params *dparams)
+{
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ struct domain *domain = NULL;
+ struct rvu_port *ports = NULL, *cur;
+ u32 dom, bus, slot, fn;
+ int old_sso, old_ssow, old_npa, old_cpt, old_tim, device;
+ int res = 0, i;
+
+ /* Validate parameters */
+ if (dparams == NULL)
+ return -EINVAL;
+ if (strnlen(dparams->name, DOMAIN_NAME_LEN) >= DOMAIN_NAME_LEN) {
+ dev_err(dev, "Domain name too long, max %d characters.\n",
+ DOMAIN_NAME_LEN);
+ return -EINVAL;
+ }
+ if (dparams->npa != 1) {
+ dev_err(dev, "Exactly 1 NPA resource required.\n");
+ return -EINVAL;
+ }
+ if (dparams->ssow < 1) {
+ dev_err(dev, "At least 1 SSOW resource required.\n");
+ return -EINVAL;
+ }
+ mutex_lock(&domain_sysfs_lock);
+ /* Find a free domain device */
+ for (i = 0; i < lsfs->domains_len; i++) {
+ if (!strncmp(lsfs->domains[i].name, dparams->name,
+ DOMAIN_NAME_LEN)) {
+ dev_err(dev, "Domain %s exists already.\n",
+ dparams->name);
+ res = -EINVAL;
+ goto err_dom;
+ }
+ if (lsfs->domains[i].in_use == false &&
+ lsfs->domains[i].rvf->in_use == false) {
+ if (domain == NULL)
+ domain = &lsfs->domains[i];
+ }
+ }
+ if (domain == NULL) {
+ dev_err(dev, "No free device to create new domain.\n");
+ res = -ENODEV;
+ goto err_dom;
+ }
+ strncpy(domain->name, dparams->name, DOMAIN_NAME_LEN - 1);
+ domain->in_use = true;
+ /* Verify ports are valid and supported. */
+ if (dparams->port_cnt == 0)
+ goto skip_ports;
+ ports = kcalloc(dparams->port_cnt, sizeof(struct rvu_port), GFP_KERNEL);
+ if (ports == NULL) {
+ dev_err(dev, "Not enough memory.\n");
+ res = -ENOMEM;
+ goto err_ports;
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ if (sscanf(dparams->ports[i], PCI_SCAN_FMT, &dom, &bus, &slot,
+ &fn) != 4) {
+ dev_err(dev, "Invalid port: %s.\n", dparams->ports[i]);
+ res = -EINVAL;
+ goto err_ports;
+ }
+ ports[i].pdev =
+ pci_get_domain_bus_and_slot(dom, bus,
+ PCI_DEVFN(slot, fn));
+ if (ports[i].pdev == NULL) {
+ dev_err(dev, "Unknown port: %s.\n", dparams->ports[i]);
+ res = -ENODEV;
+ goto err_ports;
+ }
+ device = ports[i].pdev->device;
+ if (ports[i].pdev->vendor != PCI_VENDOR_ID_CAVIUM ||
+ (device != PCI_DEVID_OCTEONTX2_RVU_PF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_PF &&
+ device != PCI_DEVID_OCTEONTX2_RVU_AFVF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_AFVF &&
+ device != PCI_DEVID_OCTEONTX2_RVU_VF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_VF)) {
+ dev_err(dev, "Unsupported port: %s.\n",
+ dparams->ports[i]);
+ res = -EINVAL;
+ goto err_ports;
+ }
+ list_for_each_entry(cur, &lsfs->ports, list) {
+ if (cur->pdev != ports[i].pdev)
+ continue;
+ dev_err(dev,
+ "Port %s already assigned to domain %s.\n",
+ dparams->ports[i], cur->domain->name);
+ res = -EBUSY;
+ goto err_ports;
+ }
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ ports[i].domain = domain;
+ list_add(&ports[i].list, &lsfs->ports);
+ }
+ domain->ports = ports;
+ domain->port_count = dparams->port_cnt;
+skip_ports:
+ mutex_unlock(&domain_sysfs_lock);
+ /* Check domain spec against limits for the parent RVU. */
+ mutex_lock(&lsfs->rdev->lock);
+ old_sso = lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val;
+ old_ssow = lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val;
+ old_npa = lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val;
+ old_cpt = lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val;
+ old_tim = lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val;
+#define CHECK_LIMITS(_ls, _val, _n, _idx) do { \
+ if (quotas_get_sum(_ls) + _val - _ls->a[_idx].val > _ls->max_sum) { \
+ dev_err(dev, \
+ "Not enough "_n" LFs, currently used: %lld/%lld\n", \
+ quotas_get_sum(_ls), _ls->max_sum); \
+ res = -ENODEV; \
+ goto err_limits; \
+ } \
+} while (0)
+ CHECK_LIMITS(lsfs->rdev->vf_limits.sso, dparams->sso, "SSO",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.ssow, dparams->ssow, "SSOW",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.npa, dparams->npa, "NPA",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.cpt, dparams->cpt, "CPT",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.tim, dparams->tim, "TIM",
+ domain->rvf->vf_id);
+ if (dparams->dpi > lsfs->dpi_info.vfs_free) {
+ dev_err(dev,
+ "Not enough DPI VFS, currently used:%d/%d\n",
+ lsfs->dpi_info.num_vfs -
+ lsfs->dpi_info.vfs_free,
+ lsfs->dpi_info.num_vfs);
+ res = -ENODEV;
+ goto err_limits;
+ }
+
+ /* Now that checks are done, update the limits */
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = dparams->sso;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = dparams->ssow;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = dparams->npa;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = dparams->cpt;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = dparams->tim;
+ lsfs->dpi_info.vfs_free -= dparams->dpi;
+ mutex_unlock(&lsfs->rdev->lock);
+
+ /* Set it up according to user spec */
+ domain->kobj = kobject_create_and_add(dparams->name, lsfs->parent);
+ if (domain->kobj == NULL) {
+ dev_err(dev, "Failed to create domain directory.\n");
+ res = -ENOMEM;
+ goto err_kobject_create;
+ }
+ res = sysfs_create_link(domain->kobj, &domain->rvf->pdev->dev.kobj,
+ pci_name(domain->rvf->pdev));
+ if (res < 0) {
+ dev_err(dev, "Failed to create dev links for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_dev_symlink;
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ res = sysfs_create_link(domain->kobj, &ports[i].pdev->dev.kobj,
+ pci_name(ports[i].pdev));
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create dev links for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_port_symlink;
+ }
+ }
+ /* Create symlinks for dpi vfs in domain */
+ for (i = 0; i < dparams->dpi; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < lsfs->dpi_info.num_vfs;
+ vf_idx++) {
+ /* Find available dpi vfs and create symlinks */
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[vf_idx];
+ if (dpivf_ptr->in_use)
+ continue;
+ else
+ break;
+ }
+ res = sysfs_create_link(domain->kobj,
+ &dpivf_ptr->pdev->dev.kobj,
+ pci_name(dpivf_ptr->pdev));
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create DPI dev links for domain %s\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dpi_symlink;
+ }
+ dpivf_ptr->domain_kobj = domain->kobj;
+ dpivf_ptr->in_use = true;
+ }
+
+ domain->domain_in_use.attr.mode = 0444;
+ domain->domain_in_use.attr.name = "domain_in_use";
+ domain->domain_in_use.show = domain_in_use_show;
+ res = sysfs_create_file(domain->kobj, &domain->domain_in_use.attr);
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create domain_in_use file for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_in_use;
+ }
+
+ domain->domain_id.attr.mode = 0444;
+ domain->domain_id.attr.name = "domain_id";
+ domain->domain_id.show = domain_id_show;
+ res = sysfs_create_file(domain->kobj, &domain->domain_id.attr);
+ if (res < 0) {
+ dev_err(dev, "Failed to create domain_id file for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_id;
+ }
+
+ return res;
+
+err_dom_id:
+ domain->domain_id.attr.mode = 0;
+ sysfs_remove_file(domain->kobj, &domain->domain_in_use.attr);
+err_dom_in_use:
+ domain->domain_in_use.attr.mode = 0;
+err_dpi_symlink:
+ for (i = 0; i < lsfs->dpi_info.num_vfs; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[i];
+ /* Identify the devices belongs to this domain */
+ if (dpivf_ptr->in_use &&
+ dpivf_ptr->domain_kobj == domain->kobj) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(dpivf_ptr->pdev));
+ dpivf_ptr->in_use = false;
+ dpivf_ptr->domain_kobj = NULL;
+ }
+ }
+err_dom_port_symlink:
+ for (i = 0; i < dparams->port_cnt; i++)
+ sysfs_remove_link(domain->kobj, pci_name(ports[i].pdev));
+ sysfs_remove_link(domain->kobj, pci_name(domain->rvf->pdev));
+err_dom_dev_symlink:
+ kobject_del(domain->kobj);
+err_kobject_create:
+ mutex_lock(&lsfs->rdev->lock);
+err_limits:
+ // restore limits
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = old_sso;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = old_ssow;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = old_npa;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = old_cpt;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = old_tim;
+ lsfs->dpi_info.vfs_free += dparams->dpi;
+ mutex_unlock(&lsfs->rdev->lock);
+ mutex_lock(&domain_sysfs_lock);
+err_ports:
+ // FREE ALL allocated ports
+ for (i = 0; i < dparams->port_cnt; i++) {
+ if (ports[i].pdev == NULL)
+ break;
+ if (ports[i].domain != NULL)
+ list_del(&ports[i].list);
+ pci_dev_put(ports[i].pdev);
+ }
+ kfree(ports);
+ domain->ports = NULL;
+ domain->port_count = 0;
+ domain->in_use = false;
+ domain->name[0] = '\0';
+err_dom:
+ mutex_unlock(&domain_sysfs_lock);
+ return res;
+}
+
+static ssize_t
+destroy_domain_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_sysfs *lsfs =
+ container_of(attr, struct domain_sysfs, destroy_domain);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ struct domain *domain = NULL;
+ char name[DOMAIN_NAME_LEN], *name_ptr;
+ int i, res;
+
+ strncpy(name, buf, DOMAIN_NAME_LEN - 1);
+ name_ptr = strim(name);
+ if (strlen(name_ptr) == 0) {
+ dev_err(dev, "Empty domain name.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&domain_sysfs_lock);
+ /* Find a free domain device */
+ for (i = 0; i < lsfs->domains_len; i++) {
+ if (!strncmp(lsfs->domains[i].name, name_ptr,
+ DOMAIN_NAME_LEN)) {
+ domain = &lsfs->domains[i];
+ break;
+ }
+ }
+ if (domain == NULL) {
+ dev_err(dev, "Domain '%s' doesn't exist.\n", name);
+ res = -EINVAL;
+ goto err_dom;
+ }
+ mutex_unlock(&domain_sysfs_lock);
+
+ res = do_destroy_domain(lsfs, domain);
+ if (res == 0)
+ res = count;
+err_dom:
+ mutex_unlock(&domain_sysfs_lock);
+ return res;
+}
+
+static ssize_t
+create_domain_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_params *dparams = NULL;
+ struct domain_sysfs *lsfs =
+ container_of(attr, struct domain_sysfs, create_domain);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ int res = 0;
+ char *start;
+ char *end;
+ char *ptr = NULL;
+ const char *name;
+ char *errmsg = "Invalid domain specification format.";
+
+ if (strlen(buf) == 0) {
+ dev_err(dev, "Empty domain spec.\n");
+ return -EINVAL;
+ }
+
+ dparams = kzalloc(sizeof(*dparams), GFP_KERNEL);
+ if (dparams == NULL) {
+ errmsg = "Not enough memory";
+ res = -ENOMEM;
+ goto error;
+ }
+
+ end = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (end == NULL) {
+ errmsg = "Not enough memory";
+ res = -ENOMEM;
+ goto error;
+ }
+
+ ptr = end;
+ memcpy(end, buf, count);
+
+ name = strsep(&end, ";");
+ if (end == NULL) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ dparams->name = name;
+
+ for (;;) {
+ start = strsep(&end, ";");
+ if (start == NULL)
+ break;
+ start = strim(start);
+ if (!*start)
+ continue;
+
+ if (!strncmp(strim(start), "port", sizeof("port") - 1)) {
+ strsep(&start, ":");
+ if (dparams->port_cnt >= RM_MAX_PORTS)
+ goto error;
+ dparams->ports[dparams->port_cnt++] = strim(start);
+ }
+ #define DP(_name, _1, _fmt) \
+ else if (!strncmp(strim(start), #_name, \
+ sizeof(#_name) - 1)) { \
+ strsep(&start, ":"); \
+ start = strim(start); \
+ res = sscanf(start, _fmt, &dparams->_name); \
+ if (res != 1) \
+ goto error; \
+ continue; \
+ }
+ DOM_PARAM_SPEC
+ #undef DP
+ else {
+ res = -EINVAL;
+ goto error;
+ }
+ }
+ res = do_create_domain(lsfs, dparams);
+ if (res < 0) {
+ errmsg = "Failed to create application domain.";
+ goto error;
+ } else
+ res = count;
+error:
+ if (res < 0)
+ dev_err(dev, "%s\n", errmsg);
+ kfree(ptr);
+ kfree(dparams);
+ return res;
+}
+
+static int dpivf_sysfs_create(struct domain_sysfs *lsfs)
+{
+ struct dpi_info *dpi_info = &lsfs->dpi_info;
+ struct dpi_vf *dpivf_ptr = NULL;
+ struct pci_dev *pdev = lsfs->rdev->pdev;
+ struct pci_dev *vdev = NULL;
+ uint8_t vf_idx = 0;
+
+ dpi_info->dpi_vf = kcalloc(DPI_MAX_VFS,
+ sizeof(struct dpi_vf), GFP_KERNEL);
+ if (dpi_info->dpi_vf == NULL)
+ return -ENOMEM;
+
+ /* Get available DPI vfs */
+ while ((vdev = pci_get_device(pdev->vendor,
+ PCI_DEVID_OCTEONTX2_DPI_VF, vdev))) {
+ if (!vdev->is_virtfn)
+ continue;
+ else {
+ dpivf_ptr = &dpi_info->dpi_vf[vf_idx];
+ dpivf_ptr->pdev = vdev;
+ dpivf_ptr->vf_id = vf_idx;
+ dpivf_ptr->in_use = false;
+ vf_idx++;
+ }
+ }
+ dpi_info->num_vfs = vf_idx;
+ dpi_info->vfs_free = vf_idx;
+ return 0;
+}
+
+static void dpivf_sysfs_destroy(struct domain_sysfs *lsfs)
+{
+ struct dpi_info *dpi_info = &lsfs->dpi_info;
+ struct dpi_vf *dpivf_ptr = NULL;
+ uint8_t vf_idx = 0;
+
+ if (dpi_info->num_vfs == 0)
+ goto free_mem;
+ else {
+ for (vf_idx = 0; vf_idx < dpi_info->num_vfs; vf_idx++) {
+ dpivf_ptr = &dpi_info->dpi_vf[vf_idx];
+ pci_dev_put(dpivf_ptr->pdev);
+ dpivf_ptr->pdev = NULL;
+ vf_idx++;
+ }
+ }
+ dpi_info->num_vfs = 0;
+
+free_mem:
+ kfree(dpi_info->dpi_vf);
+ dpi_info->dpi_vf = NULL;
+}
+
+
+static void enable_pmccntr_el0(void *data)
+{
+ u64 val;
+ /* Disable cycle counter overflow interrupt */
+ asm volatile("mrs %0, pmintenset_el1" : "=r" (val));
+ val &= ~BIT_ULL(31);
+ asm volatile("msr pmintenset_el1, %0" : : "r" (val));
+ /* Enable cycle counter */
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
+ val |= BIT_ULL(31);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+ /* Enable user-mode access to cycle counters. */
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ val |= BIT(2) | BIT(0);
+ asm volatile("msr pmuserenr_el0, %0" : : "r"(val));
+ /* Start cycle counter */
+ asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ val |= BIT(0);
+ isb();
+ asm volatile("msr pmcr_el0, %0" : : "r" (val));
+ asm volatile("mrs %0, pmccfiltr_el0" : "=r" (val));
+ val |= BIT(27);
+ asm volatile("msr pmccfiltr_el0, %0" : : "r" (val));
+}
+
+static void disable_pmccntr_el0(void *data)
+{
+ u64 val;
+ /* Disable cycle counter */
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
+ val &= ~BIT_ULL(31);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+ /* Disable user-mode access to counters. */
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ val &= ~(BIT(2) | BIT(0));
+ asm volatile("msr pmuserenr_el0, %0" : : "r"(val));
+}
+
+static ssize_t
+enadis_pmccntr_el0_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_sysfs *lsfs = container_of(attr, struct domain_sysfs,
+ pmccntr_el0);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ char tmp_buf[64];
+ long enable = 0;
+ char *tmp_ptr;
+ ssize_t used;
+
+ strlcpy(tmp_buf, buf, 64);
+ used = strlen(tmp_buf);
+ tmp_ptr = strim(tmp_buf);
+ if (kstrtol(tmp_ptr, 0, &enable)) {
+ dev_err(dev, "Invalid value, expected 1/0\n");
+ return -EIO;
+ }
+
+ if (enable)
+ on_each_cpu(enable_pmccntr_el0, NULL, 1);
+ else
+ on_each_cpu(disable_pmccntr_el0, NULL, 1);
+
+ return count;
+}
+
+static void check_pmccntr_el0(void *data)
+{
+ int *out = data;
+ u64 val;
+
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ *out = *out & !!(val & (BIT(2) | BIT(0)));
+}
+
+static ssize_t
+enadis_pmccntr_el0_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int out = 1;
+
+ on_each_cpu(check_pmccntr_el0, &out, 1);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", out);
+}
+
+int domain_sysfs_create(struct rm_dev *rm)
+{
+ struct domain_sysfs *lsfs;
+ int res = 0, i;
+
+ if (rm == NULL || rm->num_vfs == 0)
+ return -EINVAL;
+
+ lsfs = kzalloc(sizeof(*lsfs), GFP_KERNEL);
+ if (lsfs == NULL) {
+ res = -ENOMEM;
+ goto err_lsfs_alloc;
+ }
+
+ INIT_LIST_HEAD(&lsfs->ports);
+ lsfs->rdev = rm;
+ lsfs->domains_len = rm->num_vfs;
+ lsfs->domains =
+ kcalloc(lsfs->domains_len, sizeof(struct domain), GFP_KERNEL);
+ if (lsfs->domains == NULL)
+ goto err_domains_alloc;
+ for (i = 0; i < lsfs->domains_len; i++)
+ lsfs->domains[i].rvf = &rm->vf_info[i];
+
+ lsfs->create_domain.attr.name = "create_domain";
+ lsfs->create_domain.attr.mode = 0200;
+ lsfs->create_domain.store = create_domain_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj, &lsfs->create_domain.attr);
+ if (res)
+ goto err_create_domain;
+
+ lsfs->destroy_domain.attr.name = "destroy_domain";
+ lsfs->destroy_domain.attr.mode = 0200;
+ lsfs->destroy_domain.store = destroy_domain_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj,
+ &lsfs->destroy_domain.attr);
+ if (res)
+ goto err_destroy_domain;
+
+ lsfs->pmccntr_el0.attr.name = "pmccntr_el0";
+ lsfs->pmccntr_el0.attr.mode = 0644;
+ lsfs->pmccntr_el0.show = enadis_pmccntr_el0_show;
+ lsfs->pmccntr_el0.store = enadis_pmccntr_el0_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj, &lsfs->pmccntr_el0.attr);
+ if (res)
+ goto err_pmccntr_el0;
+
+ lsfs->parent = &rm->pdev->dev.kobj;
+
+ res = dpivf_sysfs_create(lsfs);
+ if (res)
+ goto err_dpivf_sysfs_create;
+
+ mutex_lock(&domain_sysfs_lock);
+ list_add_tail(&lsfs->list, &domain_sysfs_list);
+ mutex_unlock(&domain_sysfs_lock);
+
+ return 0;
+
+err_dpivf_sysfs_create:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->pmccntr_el0.attr);
+err_pmccntr_el0:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->destroy_domain.attr);
+err_destroy_domain:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->create_domain.attr);
+err_create_domain:
+ kfree(lsfs->domains);
+err_domains_alloc:
+ kfree(lsfs);
+err_lsfs_alloc:
+ return res;
+}
+
+void domain_sysfs_destroy(struct rm_dev *rm)
+{
+ struct list_head *pos, *n;
+ struct domain_sysfs *lsfs;
+
+ if (rm == NULL)
+ return;
+
+ mutex_lock(&domain_sysfs_lock);
+ list_for_each_safe(pos, n, &domain_sysfs_list) {
+ lsfs = container_of(pos, struct domain_sysfs, list);
+ if (lsfs->rdev == rm) {
+ list_del(pos);
+ break;
+ }
+ lsfs = NULL;
+ }
+ mutex_unlock(&domain_sysfs_lock);
+
+ if (lsfs == NULL)
+ return;
+
+ dpivf_sysfs_destroy(lsfs);
+
+ if (lsfs->pmccntr_el0.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->pmccntr_el0.attr);
+ if (lsfs->destroy_domain.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->destroy_domain.attr);
+ if (lsfs->create_domain.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->create_domain.attr);
+
+ kfree(lsfs->domains);
+ kfree(lsfs);
+}
diff --git a/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h
new file mode 100644
index 000000000000..d28d5b8e8f38
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DOMAIN_SYSFS_H_
+#define DOMAIN_SYSFS_H_
+
+#include "otx2_rm.h"
+
+int domain_sysfs_create(struct rm_dev *rm);
+void domain_sysfs_destroy(struct rm_dev *rm);
+
+#endif /* DOMAIN_SYSFS_H_ */
diff --git a/drivers/soc/marvell/octeontx2-rm/otx2_rm.c b/drivers/soc/marvell/octeontx2-rm/otx2_rm.c
new file mode 100644
index 000000000000..bf0e023abdda
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otx2_rm.c
@@ -0,0 +1,1581 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+#include "otx2_rm.h"
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+#include "domain_sysfs.h"
+#endif
+
+#define DRV_NAME "octeontx2-rm"
+#define DRV_VERSION "1.0"
+
+#define PCI_DEVID_OCTEONTX2_SSO_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_SSO_VF 0xA0FA
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+/* Supported devices */
+static const struct pci_device_id rvu_rm_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SSO_PF)},
+ {0} /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX2 SSO/SSOW/TIM/NPA PF Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_rm_id_table);
+
+/* All PF devices found are stored here */
+static spinlock_t rm_lst_lock;
+LIST_HEAD(rm_dev_lst_head);
+
+static void
+rm_write64(struct rm_dev *rvu, u64 b, u64 s, u64 o, u64 v)
+{
+ writeq_relaxed(v, rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static u64 rm_read64(struct rm_dev *rvu, u64 b, u64 s, u64 o)
+{
+ return readq_relaxed(rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static void enable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Enable AF-PF interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void disable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Disable AF-PF interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C, 0x1ULL);
+}
+
+static int
+forward_to_mbox(struct rm_dev *rm, struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req, int size, const char *mstr)
+{
+ struct mbox_msghdr *msg;
+ int res = 0;
+
+ msg = otx2_mbox_alloc_msg(mbox, devid, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(mbox, devid);
+ res = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU %s MBOX timeout.\n", mstr);
+ goto err;
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU %s MBOX error: %d.\n", mstr, res);
+ res = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+err:
+ return res;
+}
+
+static int
+handle_af_req(struct rm_dev *rm, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ /* We expect a request here */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&rm->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ /* If handling notifs in PF is required,add a switch-case here. */
+ return forward_to_mbox(rm, &rm->pfvf_mbox_up, vf->vf_id, req, size,
+ "VF");
+}
+
+
+static void rm_afpf_mbox_handler_up(struct work_struct *work)
+{
+ struct rm_dev *rm = container_of(work, struct rm_dev, mbox_wrk_up);
+ struct otx2_mbox *mbox = &rm->afpf_mbox_up;
+ struct otx2_mbox_dev *mdev = mbox->dev;
+ struct rvu_vf *vf;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if ((msg->pcifunc >> RVU_PFVF_PF_SHIFT) != rm->pf ||
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) <= rm->num_vfs)
+ err = -EINVAL;
+ else {
+ vf = &rm->vf_info[msg->pcifunc & RVU_PFVF_FUNC_MASK];
+ err = handle_af_req(rm, vf, msg,
+ msg->next_msgoff - offset);
+ }
+ if (err)
+ otx2_reply_invalid_msg(mbox, 0, msg->pcifunc, msg->id);
+ offset = msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static void rm_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rm_dev *rm;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg, *fwd;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ struct free_rsrcs_rsp *rsp;
+ int offset, i, vf_id, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ rm = container_of(work, struct rm_dev, mbox_wrk);
+ af_mbx = &rm->afpf_mbox;
+ vf_mbx = &rm->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(af_mbx->dev->mbase + af_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(af_mbx->dev->mbase +
+ af_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ vf_id = (msg->pcifunc & RVU_PFVF_FUNC_MASK);
+ if (vf_id > 0) {
+ if (vf_id > rm->num_vfs) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, rm->num_vfs);
+ goto end;
+ }
+ vf = &rm->vf_info[vf_id - 1];
+ /* Ignore stale responses and VFs in FLR. */
+ if (!vf->in_use || vf->got_flr)
+ goto end;
+ fwd = otx2_mbox_alloc_msg(vf_mbx, vf_id - 1, size);
+ if (!fwd) {
+ dev_err(&rm->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+ } else {
+ if (msg->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with version %04x != %04x\n",
+ msg->ver, OTX2_MBOX_VERSION);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ rm->pf = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ rsp = (struct free_rsrcs_rsp *)msg;
+ memcpy(&rm->limits, msg, sizeof(*rsp));
+ break;
+ default:
+ dev_err(&rm->pdev->dev,
+ "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+ }
+end:
+ offset = msg->next_msgoff;
+ af_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(af_mbx, 0);
+}
+
+static int
+reply_free_rsrc_cnt(struct rm_dev *rm, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct free_rsrcs_rsp *rsp;
+
+ rsp = (struct free_rsrcs_rsp *)otx2_mbox_alloc_msg(&rm->pfvf_mbox,
+ vf->vf_id,
+ sizeof(*rsp));
+ if (rsp == NULL)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_FREE_RSRC_CNT;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ mutex_lock(&rm->lock);
+ rsp->sso = rm->vf_limits.sso->a[vf->vf_id].val;
+ rsp->ssow = rm->vf_limits.ssow->a[vf->vf_id].val;
+ rsp->npa = rm->vf_limits.npa->a[vf->vf_id].val;
+ rsp->cpt = rm->vf_limits.cpt->a[vf->vf_id].val;
+ rsp->tim = rm->vf_limits.tim->a[vf->vf_id].val;
+ rsp->nix = 0;
+ mutex_unlock(&rm->lock);
+ return 0;
+}
+
+static int
+check_attach_rsrcs_req(struct rm_dev *rm, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct rsrc_attach *rsrc_req;
+
+ rsrc_req = (struct rsrc_attach *)req;
+ mutex_lock(&rm->lock);
+ if (rsrc_req->sso > rm->vf_limits.sso->a[vf->vf_id].val ||
+ rsrc_req->ssow > rm->vf_limits.ssow->a[vf->vf_id].val ||
+ rsrc_req->npalf > rm->vf_limits.npa->a[vf->vf_id].val ||
+ rsrc_req->timlfs > rm->vf_limits.tim->a[vf->vf_id].val ||
+ rsrc_req->cptlfs > rm->vf_limits.cpt->a[vf->vf_id].val ||
+ rsrc_req->nixlf > 0) {
+ dev_err(&rm->pdev->dev,
+ "Invalid ATTACH_RESOURCES request from %s\n",
+ dev_name(&vf->pdev->dev));
+ mutex_unlock(&rm->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&rm->lock);
+ return forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+}
+
+static int
+handle_vf_req(struct rm_dev *rm, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ int err = 0;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_READY:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ vf->in_use = true;
+ err = forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = reply_free_rsrc_cnt(rm, vf, req, size);
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = check_attach_rsrcs_req(rm, vf, req, size);
+ break;
+ default:
+ err = forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+ break;
+ }
+
+ return err;
+}
+
+static int send_flr_msg(struct otx2_mbox *mbox, int dev_id, int pcifunc)
+{
+ struct msg_req *req;
+
+ req = (struct msg_req *)
+ otx2_mbox_alloc_msg(mbox, dev_id, sizeof(*req));
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->hdr.pcifunc = pcifunc;
+ req->hdr.id = MBOX_MSG_VF_FLR;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ return 0;
+}
+
+static void rm_send_flr_msg(struct rm_dev *rm, struct rvu_vf *vf)
+{
+ int res, pcifunc;
+
+ pcifunc = (vf->rm->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ if (send_flr_msg(&rm->afpf_mbox, 0, pcifunc) != 0) {
+ dev_err(&rm->pdev->dev, "Sending FLR to AF failed\n");
+ return;
+ }
+
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ }
+}
+
+static void rm_send_flr_to_dpi(struct rm_dev *rm)
+{
+ /* TODO: DPI VF's needs to be handled */
+}
+
+static void rm_pfvf_flr_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, pfvf_flr_work);
+ struct rm_dev *rm = vf->rm;
+ struct otx2_mbox *mbox = &rm->pfvf_mbox;
+
+ rm_send_flr_to_dpi(rm);
+ rm_send_flr_msg(rm, vf);
+
+ /* Disable interrupts from AF and wait for any pending
+ * responses to be handled for this VF and then reset the
+ * mailbox
+ */
+ disable_af_mbox_int(rm->pdev);
+ flush_workqueue(rm->afpf_mbox_wq);
+ otx2_mbox_reset(mbox, vf->vf_id);
+ vf->in_use = false;
+ vf->got_flr = false;
+ enable_af_mbox_int(rm->pdev);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(vf->vf_id / 64),
+ BIT_ULL(vf->intr_idx));
+}
+
+static void rm_pfvf_mbox_handler_up(struct work_struct *work)
+{
+ struct rm_dev *rm;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg, *fwd;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ int offset, i, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ vf = container_of(work, struct rvu_vf, mbox_wrk_up);
+ rm = vf->rm;
+ af_mbx = &rm->afpf_mbox;
+ vf_mbx = &rm->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(vf_mbx->dev[vf->vf_id].mbase +
+ vf_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(vf_mbx->dev->mbase +
+ vf_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&rm->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ /* override message value with actual values */
+ msg->pcifunc = (rm->pf << RVU_PFVF_PF_SHIFT) | vf->vf_id;
+
+ fwd = otx2_mbox_alloc_msg(af_mbx, 0, size);
+ if (!fwd) {
+ dev_err(&rm->pdev->dev,
+ "UP Forwarding from VF%d to AF failed.\n",
+ vf->vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+end:
+ offset = msg->next_msgoff;
+ vf_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(vf_mbx, vf->vf_id);
+}
+
+static void rm_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, mbox_wrk);
+ struct rm_dev *rm = vf->rm;
+ struct otx2_mbox *mbox = &rm->pfvf_mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf->vf_id];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)rm->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+ err = handle_vf_req(rm, vf, msg, msg->next_msgoff - offset);
+ if (err)
+ otx2_reply_invalid_msg(mbox, vf->vf_id, msg->pcifunc,
+ msg->id);
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+static irqreturn_t rm_af_pf_mbox_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct mbox_hdr *hdr;
+ struct otx2_mbox *mbox;
+ struct otx2_mbox_dev *mdev;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ mbox = &rm->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => AF channel response */
+ if (hdr->num_msgs)
+ queue_work(rm->afpf_mbox_wq, &rm->mbox_wrk);
+
+ mbox = &rm->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle AF => PF request */
+ if (hdr->num_msgs)
+ queue_work(rm->afpf_mbox_wq, &rm->mbox_wrk_up);
+
+ /* Clear and ack the interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ return IRQ_HANDLED;
+}
+
+static void __handle_vf_flr(struct rm_dev *rm, struct rvu_vf *vf_ptr)
+{
+ if (vf_ptr->in_use) {
+ /* Using the same MBOX workqueue here, so that we can
+ * synchronize with other VF->PF messages being forwarded to
+ * AF
+ */
+ vf_ptr->got_flr = true;
+ queue_work(rm->pfvf_mbox_wq, &vf_ptr->pfvf_flr_work);
+ } else
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(vf_ptr->vf_id / 64),
+ BIT_ULL(vf_ptr->intr_idx));
+}
+
+static irqreturn_t rm_pf_vf_flr_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ u64 intr;
+ struct rvu_vf *vf_ptr;
+ int vf, i;
+
+ /* Check which VF FLR has been raised and process accordingly */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(i));
+
+ for (vf = i * 64; vf < rm->num_vfs; vf++) {
+ vf_ptr = &rm->vf_info[vf];
+ if (intr & (1ULL << vf_ptr->intr_idx)) {
+ /* Clear the interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(i),
+ BIT_ULL(vf_ptr->intr_idx));
+ __handle_vf_flr(rm, vf_ptr);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rm_pf_vf_mbox_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct mbox_hdr *hdr;
+ struct otx2_mbox *mbox;
+ struct otx2_mbox_dev *mdev;
+ u64 intr;
+ struct rvu_vf *vf;
+ int i, vfi;
+
+ /* Check which VF has raised an interrupt and schedule corresponding
+ * workq to process the MBOX
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vfi = i * 64; vfi < rm->num_vfs; vfi++) {
+ vf = &rm->vf_info[vfi];
+ if ((intr & (1ULL << vf->intr_idx)) == 0)
+ continue;
+ mbox = &rm->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle VF => PF channel request */
+ if (hdr->num_msgs)
+ queue_work(rm->pfvf_mbox_wq, &vf->mbox_wrk);
+
+ mbox = &rm->pfvf_mbox_up;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => VF channel response */
+ if (hdr->num_msgs)
+ queue_work(rm->pfvf_mbox_wq, &vf->mbox_wrk_up);
+ /* Clear the interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rm_register_flr_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec, i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Register for VF FLR interrupts
+ * There are 2 vectors starting at index 0x0
+ */
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++) {
+ sprintf(&rm->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_FLR_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ rm_pf_vf_flr_intr, 0,
+ &rm->irq_names[(vec + i) * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF FLR intr %d\n",
+ vec);
+ goto reg_fail;
+ }
+ rm->irq_allocated[vec + i] = true;
+ }
+
+ return 0;
+
+reg_fail:
+
+ return err;
+}
+
+static void rm_free_flr_irq(struct pci_dev *pdev)
+{
+ (void) pdev;
+ /* Nothing here but will free workqueues */
+}
+
+static int rm_alloc_irqs(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Get number of MSIX vector count and allocate vectors first */
+ rm->msix_count = pci_msix_vec_count(pdev);
+
+ err = pci_alloc_irq_vectors(pdev, rm->msix_count, rm->msix_count,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed %d\n", err);
+ return err;
+ }
+
+ rm->irq_names = kmalloc_array(rm->msix_count, NAME_SIZE, GFP_KERNEL);
+ if (!rm->irq_names) {
+ err = -ENOMEM;
+ goto err_irq_names;
+ }
+
+ rm->irq_allocated = kcalloc(rm->msix_count, sizeof(bool), GFP_KERNEL);
+ if (!rm->irq_allocated) {
+ err = -ENOMEM;
+ goto err_irq_allocated;
+ }
+
+ return 0;
+
+err_irq_allocated:
+ kfree(rm->irq_names);
+ rm->irq_names = NULL;
+err_irq_names:
+ pci_free_irq_vectors(pdev);
+ rm->msix_count = 0;
+
+ return err;
+}
+
+static void rm_free_irqs(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int irq;
+
+ rm = pci_get_drvdata(pdev);
+ for (irq = 0; irq < rm->msix_count; irq++) {
+ if (rm->irq_allocated[irq])
+ free_irq(pci_irq_vector(rm->pdev, irq), rm);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ kfree(rm->irq_names);
+ kfree(rm->irq_allocated);
+}
+
+static int rm_register_mbox_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec = RVU_PF_INT_VEC_VFPF_MBOX0, i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Register PF-AF interrupt handler */
+ sprintf(&rm->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ "PF%02d_AF_MBOX_IRQ", pdev->devfn);
+ err = request_irq(pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ rm_af_pf_mbox_intr, 0,
+ &rm->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for AF_PF MSIX vector\n");
+ return err;
+ }
+ rm->irq_allocated[RVU_PF_INT_VEC_AFPF_MBOX] = true;
+
+ err = otx2_mbox_init(&rm->afpf_mbox, rm->af_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFAF, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF MBOX\n");
+ goto error;
+ }
+ err = otx2_mbox_init(&rm->afpf_mbox_up, rm->af_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF UP MBOX\n");
+ goto error;
+ }
+
+ /* Register for PF-VF mailbox interrupts
+ * There are 2 vectors starting at index 0x4
+ */
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++) {
+ sprintf(&rm->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_MBOX_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ rm_pf_vf_mbox_intr, 0,
+ &rm->irq_names[(vec + i) * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF Mbox intr %d\n",
+ vec + i);
+ goto error;
+ }
+ rm->irq_allocated[vec + i] = true;
+ }
+
+ rm->afpf_mbox_wq = alloc_workqueue(
+ "rm_pfaf_mailbox", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (!rm->afpf_mbox_wq)
+ goto error;
+
+ INIT_WORK(&rm->mbox_wrk, rm_afpf_mbox_handler);
+ INIT_WORK(&rm->mbox_wrk_up, rm_afpf_mbox_handler_up);
+
+ return err;
+
+error:
+ if (rm->afpf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&rm->afpf_mbox_up);
+ if (rm->afpf_mbox.dev != NULL)
+ otx2_mbox_destroy(&rm->afpf_mbox);
+
+ return err;
+}
+
+static int rm_get_pcifunc(struct rm_dev *rm)
+{
+ struct msg_req *ready_req;
+ int res = 0;
+
+ ready_req = (struct msg_req *)
+ otx2_mbox_alloc_msg_rsp(&rm->afpf_mbox, 0, sizeof(ready_req),
+ sizeof(struct ready_msg_rsp));
+ if (ready_req == NULL) {
+ dev_err(&rm->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ ready_req->hdr.id = MBOX_MSG_READY;
+ ready_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ otx2_mbox_msg_send(&rm->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev, "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static int rm_get_available_rsrcs(struct rm_dev *rm)
+{
+ struct mbox_msghdr *rsrc_req;
+ int res = 0;
+
+ rsrc_req = otx2_mbox_alloc_msg(&rm->afpf_mbox, 0, sizeof(*rsrc_req));
+ if (rsrc_req == NULL) {
+ dev_err(&rm->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ rsrc_req->id = MBOX_MSG_FREE_RSRC_CNT;
+ rsrc_req->sig = OTX2_MBOX_REQ_SIG;
+ rsrc_req->pcifunc = RVU_PFFUNC(rm->pf, 0);
+ otx2_mbox_msg_send(&rm->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static void rm_afpf_mbox_term(struct pci_dev *pdev)
+{
+ struct rm_dev *rm = pci_get_drvdata(pdev);
+
+ flush_workqueue(rm->afpf_mbox_wq);
+ destroy_workqueue(rm->afpf_mbox_wq);
+ otx2_mbox_destroy(&rm->afpf_mbox);
+ otx2_mbox_destroy(&rm->afpf_mbox_up);
+}
+
+static ssize_t vf_in_use_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rvu_vf *vf = container_of(attr, struct rvu_vf, in_use_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vf->in_use);
+}
+
+static void vf_sysfs_destroy(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ struct rvu_vf *vf;
+ int i;
+
+ rm = pci_get_drvdata(pdev);
+
+ quotas_free(rm->vf_limits.sso);
+ quotas_free(rm->vf_limits.ssow);
+ quotas_free(rm->vf_limits.npa);
+ quotas_free(rm->vf_limits.cpt);
+ quotas_free(rm->vf_limits.tim);
+ rm->vf_limits.sso = NULL;
+ rm->vf_limits.ssow = NULL;
+ rm->vf_limits.npa = NULL;
+ rm->vf_limits.cpt = NULL;
+ rm->vf_limits.tim = NULL;
+
+ for (i = 0; i < rm->num_vfs; i++) {
+ vf = &rm->vf_info[i];
+ if (vf->limits_kobj == NULL)
+ continue;
+ if (vf->in_use_attr.attr.mode != 0) {
+ sysfs_remove_file(&vf->pdev->dev.kobj,
+ &vf->in_use_attr.attr);
+ vf->in_use_attr.attr.mode = 0;
+ }
+ kobject_del(vf->limits_kobj);
+ vf->limits_kobj = NULL;
+ pci_dev_put(vf->pdev);
+ vf->pdev = NULL;
+ }
+}
+
+static int check_vf_in_use(void *arg, struct quota *quota, int new_val)
+{
+ struct rvu_vf *vf = arg;
+
+ if (vf->in_use) {
+ dev_err(quota->dev, "Can't modify limits, device is in use.\n");
+ return 1;
+ }
+ return 0;
+}
+
+static struct quota_ops vf_limit_ops = {
+ .pre_store = check_vf_in_use,
+};
+
+static int vf_sysfs_create(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ struct pci_dev *vdev;
+ struct rvu_vf *vf;
+ int err, i;
+
+ vdev = NULL;
+ vf = NULL;
+ rm = pci_get_drvdata(pdev);
+ err = 0;
+ i = 0;
+
+ /* Create limit structures for all resource types */
+ rm->vf_limits.sso = quotas_alloc(rm->num_vfs, rm->limits.sso,
+ rm->limits.sso, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.sso == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate sso limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.ssow = quotas_alloc(rm->num_vfs, rm->limits.ssow,
+ rm->limits.ssow, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.ssow == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate ssow limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ /* AF currently reports only 0-1 for PF but there's more free LFs.
+ * Until we implement proper limits in AF, use max num_vfs in total.
+ */
+ rm->vf_limits.npa = quotas_alloc(rm->num_vfs, 1, rm->num_vfs, 0,
+ &rm->lock, &vf_limit_ops);
+ if (rm->vf_limits.npa == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate npa limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.cpt = quotas_alloc(rm->num_vfs, rm->limits.cpt,
+ rm->limits.cpt, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.cpt == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate cpt limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.tim = quotas_alloc(rm->num_vfs, rm->limits.tim,
+ rm->limits.tim, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.tim == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate tim limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ /* loop through all the VFs and create sysfs entries for them */
+ while ((vdev = pci_get_device(pdev->vendor, PCI_DEVID_OCTEONTX2_SSO_VF,
+ vdev))) {
+ if (!vdev->is_virtfn || (vdev->physfn != pdev))
+ continue;
+ vf = &rm->vf_info[i];
+ vf->pdev = pci_dev_get(vdev);
+ vf->limits_kobj = kobject_create_and_add("limits",
+ &vdev->dev.kobj);
+ if (vf->limits_kobj == NULL) {
+ err = -ENOMEM;
+ goto error;
+ }
+ if (quota_sysfs_create("sso", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.sso->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create sso limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("ssow", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.ssow->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create ssow limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("npa", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.npa->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create npa limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("cpt", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.cpt->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create cpt limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("tim", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.tim->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create tim limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+
+ vf->in_use_attr.show = vf_in_use_show;
+ vf->in_use_attr.attr.name = "in_use";
+ vf->in_use_attr.attr.mode = 0444;
+ sysfs_attr_init(&vf->in_use_attr.attr);
+ if (sysfs_create_file(&vdev->dev.kobj, &vf->in_use_attr.attr)) {
+ dev_err(&pdev->dev,
+ "Failed to create in_use sysfs entry for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ i++;
+ }
+
+ return 0;
+error:
+ vf_sysfs_destroy(pdev);
+ return err;
+}
+
+static int rm_check_pf_usable(struct rm_dev *rm)
+{
+ u64 rev;
+
+ rev = rm_read64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(&rm->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int rm_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rm_dev *rm;
+ int err;
+
+ rm = devm_kzalloc(dev, sizeof(struct rm_dev), GFP_KERNEL);
+ if (rm == NULL)
+ return -ENOMEM;
+
+ rm->pdev = pdev;
+ pci_set_drvdata(pdev, rm);
+
+ mutex_init(&rm->lock);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto map_failed;
+ }
+
+ if (pci_sriov_get_totalvfs(pdev) <= 0) {
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /* CSR Space mapping */
+ rm->bar2 = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM,
+ pci_resource_len(pdev, PCI_CFG_REG_BAR_NUM));
+ if (!rm->bar2) {
+ dev_err(&pdev->dev, "Unable to map BAR2\n");
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = rm_check_pf_usable(rm);
+ if (err)
+ goto pf_unusable;
+
+ /* Map PF-AF mailbox memory */
+ rm->af_mbx_base = ioremap_wc(pci_resource_start(pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(pdev, PCI_MBOX_BAR_NUM));
+ if (!rm->af_mbx_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto pf_unusable;
+ }
+
+ /* Request IRQ for PF-VF mailbox here - TBD: check if this can be moved
+ * to sriov enable function
+ */
+ if (rm_alloc_irqs(pdev)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MSIX Interrupt vectors\n");
+ err = -ENODEV;
+ goto alloc_irqs_failed;
+ }
+
+ if (rm_register_mbox_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MBOX Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_mbox_irq_failed;
+ }
+
+ if (rm_register_flr_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate FLR Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_flr_irq_failed;
+ }
+
+ enable_af_mbox_int(pdev);
+
+ if (rm_get_pcifunc(rm)) {
+ dev_err(&pdev->dev,
+ "Failed to retrieve pcifunc from AF\n");
+ err = -ENODEV;
+ goto get_pcifunc_failed;
+ }
+
+ /* Add to global list of PFs found */
+ spin_lock(&rm_lst_lock);
+ list_add(&rm->list, &rm_dev_lst_head);
+ spin_unlock(&rm_lst_lock);
+
+ return 0;
+
+get_pcifunc_failed:
+ disable_af_mbox_int(pdev);
+ rm_free_flr_irq(pdev);
+reg_flr_irq_failed:
+ rm_afpf_mbox_term(pdev);
+reg_mbox_irq_failed:
+ rm_free_irqs(pdev);
+alloc_irqs_failed:
+ iounmap(rm->af_mbx_base);
+pf_unusable:
+ pcim_iounmap(pdev, rm->bar2);
+set_mask_failed:
+ pci_release_regions(pdev);
+map_failed:
+ pci_disable_device(pdev);
+enable_failed:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, rm);
+ return err;
+}
+
+static void enable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), ~0x0ULL);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+
+ rm = pci_get_drvdata(pdev);
+ /* clear any pending interrupt */
+
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), intr);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void enable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1),
+ ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+
+ rm = pci_get_drvdata(pdev);
+ /* clear any pending interrupt */
+
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static int __sriov_disable(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ if (pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev, "Disabing VFs while VFs are assigned\n");
+ dev_err(&pdev->dev, "VFs will not be freed\n");
+ return -EPERM;
+ }
+
+ disable_vf_flr_int(pdev);
+ disable_vf_mbox_int(pdev);
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+ domain_sysfs_destroy(rm);
+#endif
+ vf_sysfs_destroy(pdev);
+
+ if (rm->pfvf_mbox_wq) {
+ flush_workqueue(rm->pfvf_mbox_wq);
+ destroy_workqueue(rm->pfvf_mbox_wq);
+ rm->pfvf_mbox_wq = NULL;
+ }
+ if (rm->pfvf_mbx_base) {
+ iounmap(rm->pfvf_mbx_base);
+ rm->pfvf_mbx_base = NULL;
+ }
+
+ otx2_mbox_destroy(&rm->pfvf_mbox);
+ otx2_mbox_destroy(&rm->pfvf_mbox_up);
+
+ pci_disable_sriov(pdev);
+
+ kfree(rm->vf_info);
+ rm->vf_info = NULL;
+
+ return 0;
+}
+
+static int __sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ int curr_vfs, vf = 0;
+ int err;
+ struct rm_dev *rm;
+ struct rvu_vf *vf_ptr;
+ u64 pf_vf_mbox_base;
+
+ curr_vfs = pci_num_vf(pdev);
+ if (!curr_vfs && !num_vfs)
+ return -EINVAL;
+
+ if (curr_vfs) {
+ dev_err(
+ &pdev->dev,
+ "Virtual Functions are already enabled on this device\n");
+ return -EINVAL;
+ }
+ if (num_vfs > RM_MAX_VFS)
+ num_vfs = RM_MAX_VFS;
+
+ rm = pci_get_drvdata(pdev);
+
+ if (rm_get_available_rsrcs(rm)) {
+ dev_err(&pdev->dev, "Failed to get resource limits.\n");
+ return -EFAULT;
+ }
+
+ rm->vf_info = kcalloc(num_vfs, sizeof(struct rvu_vf), GFP_KERNEL);
+ if (rm->vf_info == NULL)
+ return -ENOMEM;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable to SRIOV VFs: %d\n", err);
+ goto err_enable_sriov;
+ }
+
+ rm->num_vfs = num_vfs;
+
+ /* Map PF-VF mailbox memory */
+ pf_vf_mbox_base = (u64)rm->bar2 + RVU_PF_VF_BAR4_ADDR;
+ pf_vf_mbox_base = readq((void __iomem *)(unsigned long)pf_vf_mbox_base);
+ if (!pf_vf_mbox_base) {
+ dev_err(&pdev->dev, "PF-VF Mailbox address not configured\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ rm->pfvf_mbx_base = ioremap_wc(pf_vf_mbox_base, MBOX_SIZE * num_vfs);
+ if (!rm->pfvf_mbx_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF-VF mailbox address failed\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ err = otx2_mbox_init(&rm->pfvf_mbox, rm->pfvf_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFVF, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX for %d VFs\n",
+ num_vfs);
+ goto err_mbox_init;
+ }
+
+ err = otx2_mbox_init(&rm->pfvf_mbox_up, rm->pfvf_mbx_base, pdev,
+ rm->bar2, MBOX_DIR_PFVF_UP, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX UP for %d VFs\n",
+ num_vfs);
+ goto err_mbox_up_init;
+ }
+
+ /* Allocate a single workqueue for VF/PF mailbox because access to
+ * AF/PF mailbox has to be synchronized.
+ */
+ rm->pfvf_mbox_wq =
+ alloc_workqueue("rm_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (rm->pfvf_mbox_wq == NULL) {
+ dev_err(&pdev->dev,
+ "Workqueue allocation failed for PF-VF MBOX\n");
+ err = -ENOMEM;
+ goto err_workqueue_alloc;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ vf_ptr = &rm->vf_info[vf];
+ vf_ptr->vf_id = vf;
+ vf_ptr->rm = (void *)rm;
+ vf_ptr->intr_idx = vf % 64;
+ INIT_WORK(&vf_ptr->mbox_wrk, rm_pfvf_mbox_handler);
+ INIT_WORK(&vf_ptr->mbox_wrk_up, rm_pfvf_mbox_handler_up);
+ INIT_WORK(&vf_ptr->pfvf_flr_work, rm_pfvf_flr_handler);
+ }
+
+ err = vf_sysfs_create(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize VF sysfs entries. Err=%d\n",
+ err);
+ err = -EFAULT;
+ goto err_vf_sysfs_create;
+ }
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+ err = domain_sysfs_create(rm);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create RM domain sysfs\n");
+ err = -EFAULT;
+ goto err_domain_sysfs_create;
+ }
+#endif
+
+ enable_vf_mbox_int(pdev);
+ enable_vf_flr_int(pdev);
+ return num_vfs;
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+err_domain_sysfs_create:
+ vf_sysfs_destroy(pdev);
+#endif
+err_vf_sysfs_create:
+err_workqueue_alloc:
+ destroy_workqueue(rm->pfvf_mbox_wq);
+ if (rm->pfvf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&rm->pfvf_mbox_up);
+err_mbox_up_init:
+ if (rm->pfvf_mbox.dev != NULL)
+ otx2_mbox_destroy(&rm->pfvf_mbox);
+err_mbox_init:
+ iounmap(rm->pfvf_mbx_base);
+err_mbox_mem_map:
+ pci_disable_sriov(pdev);
+err_enable_sriov:
+ kfree(rm->vf_info);
+
+ return err;
+}
+
+static int rm_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return __sriov_disable(pdev);
+ else
+ return __sriov_enable(pdev, num_vfs);
+}
+
+static void rm_remove(struct pci_dev *pdev)
+{
+ struct rm_dev *rm = pci_get_drvdata(pdev);
+
+ spin_lock(&rm_lst_lock);
+ list_del(&rm->list);
+ spin_unlock(&rm_lst_lock);
+
+ if (rm->num_vfs)
+ __sriov_disable(pdev);
+
+ disable_af_mbox_int(pdev);
+ rm_free_flr_irq(pdev);
+ rm_afpf_mbox_term(pdev);
+ rm_free_irqs(pdev);
+
+ if (rm->af_mbx_base)
+ iounmap(rm->af_mbx_base);
+ if (rm->bar2)
+ pcim_iounmap(pdev, rm->bar2);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ devm_kfree(&pdev->dev, rm);
+}
+
+static struct pci_driver rm_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_rm_id_table,
+ .probe = rm_probe,
+ .remove = rm_remove,
+ .sriov_configure = rm_sriov_configure,
+};
+
+static int __init otx2_rm_init_module(void)
+{
+ pr_info("%s\n", DRV_NAME);
+
+ spin_lock_init(&rm_lst_lock);
+ return pci_register_driver(&rm_driver);
+}
+
+static void __exit otx2_rm_exit_module(void)
+{
+ pci_unregister_driver(&rm_driver);
+}
+
+module_init(otx2_rm_init_module);
+module_exit(otx2_rm_exit_module);
diff --git a/drivers/soc/marvell/octeontx2-rm/otx2_rm.h b/drivers/soc/marvell/octeontx2-rm/otx2_rm.h
new file mode 100644
index 000000000000..2375e84c1198
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otx2_rm.h
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef RM_H_
+#define RM_H_
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include "mbox.h"
+#include "quota.h"
+
+#define MAX_DOM_VFS 8
+#define RM_MAX_VFS 128
+/* 12 CGX PFs + max HWVFs - VFs used for domains */
+#define RM_MAX_PORTS (12 + 256 - MAX_DOM_VFS)
+#define NAME_SIZE 32
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_PF 0x0063 /* Errata */
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_AFVF 0x00F8
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_VF 0xA064
+
+struct rm_dev;
+
+struct rvu_vf {
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct work_struct pfvf_flr_work;
+ struct device_attribute in_use_attr;
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
+ /* pointer to PF struct this PF belongs to */
+ struct rm_dev *rm;
+ int vf_id;
+ int intr_idx; /* vf_id%64 actually */
+ bool in_use;
+ bool got_flr;
+};
+
+struct rvu_limits {
+ struct quotas *sso;
+ struct quotas *ssow;
+ struct quotas *npa;
+ struct quotas *tim;
+ struct quotas *cpt;
+};
+
+struct rm_dev {
+ struct list_head list;
+ struct mutex lock;
+ struct pci_dev *pdev;
+ void __iomem *bar2;
+ void __iomem *af_mbx_base;
+ void __iomem *pfvf_mbx_base;
+#define RM_VF_ENABLED 0x1
+ u32 flags;
+ u32 num_vfs;
+ bool *irq_allocated;
+ char *irq_names;
+ int msix_count;
+ int pf;
+
+ struct otx2_mbox pfvf_mbox; /* MBOXes for VF => PF channel */
+ struct otx2_mbox pfvf_mbox_up; /* MBOXes for PF => VF channel */
+ struct otx2_mbox afpf_mbox; /* MBOX for PF => AF channel */
+ struct otx2_mbox afpf_mbox_up; /* MBOX for AF => PF channel */
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct workqueue_struct *afpf_mbox_wq; /* MBOX handler */
+ struct workqueue_struct *pfvf_mbox_wq; /* VF MBOX handler */
+ struct rvu_vf *vf_info;
+ struct free_rsrcs_rsp limits; /* Maximum limits for all VFs */
+ struct rvu_limits vf_limits; /* Limits for each VF */
+};
+
+#endif /* RM_H_ */
diff --git a/drivers/soc/marvell/octeontx2-rm/quota.c b/drivers/soc/marvell/octeontx2-rm/quota.c
new file mode 100644
index 000000000000..361b903cf86c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/quota.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "quota.h"
+
+static ssize_t quota_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct quota *quota;
+ int val;
+
+ quota = container_of(attr, struct quota, sysfs);
+
+ if (quota->base->lock)
+ mutex_lock(quota->base->lock);
+ val = quota->val;
+ if (quota->base->lock)
+ mutex_unlock(quota->base->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t quota_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct quota *quota;
+ struct quotas *base;
+ struct device *dev;
+ int old_val, new_val, res = 0;
+ u64 lf_sum;
+
+ quota = container_of(attr, struct quota, sysfs);
+ dev = quota->dev;
+ base = quota->base;
+
+ if (kstrtoint(buf, 0, &new_val)) {
+ dev_err(dev, "Invalid %s quota: %s\n", attr->attr.name, buf);
+ return -EIO;
+ }
+ if (new_val < 0) {
+ dev_err(dev, "Invalid %s quota: %d < 0\n", attr->attr.name,
+ new_val);
+ return -EIO;
+ }
+
+ if (new_val > base->max) {
+ dev_err(dev, "Invalid %s quota: %d > %d\n", attr->attr.name,
+ new_val, base->max);
+ return -EIO;
+ }
+
+ if (base->lock)
+ mutex_lock(base->lock);
+ old_val = quota->val;
+
+ if (base->ops.pre_store)
+ res = base->ops.pre_store(quota->ops_arg, quota, new_val);
+
+ if (res != 0) {
+ res = -EIO;
+ goto unlock;
+ }
+
+ lf_sum = quotas_get_sum(quota->base);
+
+ if (lf_sum + new_val - quota->val > base->max_sum) {
+ dev_err(dev,
+ "Not enough resources for %s quota. Used: %lld, Max: %lld\n",
+ attr->attr.name, lf_sum, base->max_sum);
+ res = -EIO;
+ goto unlock;
+ }
+ quota->val = new_val;
+
+ if (base->ops.post_store)
+ base->ops.post_store(quota->ops_arg, quota, old_val);
+
+ res = count;
+
+unlock:
+ if (base->lock)
+ mutex_unlock(base->lock);
+ return res;
+}
+
+
+struct quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct quota_ops *ops)
+{
+ struct quotas *quotas;
+ u64 i;
+
+ if (cnt == 0)
+ return NULL;
+
+ quotas = kzalloc(sizeof(struct quotas) + cnt * sizeof(struct quota),
+ GFP_KERNEL);
+ if (quotas == NULL)
+ return NULL;
+
+ for (i = 0; i < cnt; i++) {
+ quotas->a[i].base = quotas;
+ quotas->a[i].val = init_val;
+ }
+
+ quotas->cnt = cnt;
+ quotas->max = max;
+ quotas->max_sum = max_sum;
+ if (ops) {
+ quotas->ops.pre_store = ops->pre_store;
+ quotas->ops.post_store = ops->post_store;
+ }
+ quotas->lock = lock;
+
+ return quotas;
+}
+
+void quotas_free(struct quotas *quotas)
+{
+ u64 i;
+
+ if (quotas == NULL)
+ return;
+ WARN_ON(quotas->cnt == 0);
+
+ for (i = 0; i < quotas->cnt; i++)
+ quota_sysfs_destroy(&quotas->a[i]);
+
+ kfree(quotas);
+}
+
+int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct quota *quota,
+ void *ops_arg)
+{
+ int err;
+
+ if (name == NULL || quota == NULL || log_dev == NULL)
+ return -EINVAL;
+
+ quota->sysfs.show = quota_show;
+ quota->sysfs.store = quota_store;
+ quota->sysfs.attr.name = name;
+ quota->sysfs.attr.mode = 0644;
+ quota->parent = parent;
+ quota->dev = log_dev;
+ quota->ops_arg = ops_arg;
+
+ sysfs_attr_init(&quota->sysfs.attr);
+ err = sysfs_create_file(quota->parent, &quota->sysfs.attr);
+ if (err) {
+ dev_err(quota->dev,
+ "Failed to create '%s' quota sysfs for '%s'\n",
+ name, kobject_name(quota->parent));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int quota_sysfs_destroy(struct quota *quota)
+{
+ if (quota == NULL)
+ return -EINVAL;
+ if (quota->sysfs.attr.mode != 0) {
+ sysfs_remove_file(quota->parent, &quota->sysfs.attr);
+ quota->sysfs.attr.mode = 0;
+ }
+ return 0;
+}
+
+u64 quotas_get_sum(struct quotas *quotas)
+{
+ u64 lf_sum = 0;
+ int i;
+
+ for (i = 0; i < quotas->cnt; i++)
+ lf_sum += quotas->a[i].val;
+
+ return lf_sum;
+}
+
diff --git a/drivers/soc/marvell/octeontx2-rm/quota.h b/drivers/soc/marvell/octeontx2-rm/quota.h
new file mode 100644
index 000000000000..84b12f952f4c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/quota.h
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef QUOTA_H_
+#define QUOTA_H_
+
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+
+struct quotas;
+
+struct quota {
+ struct kobj_attribute sysfs;
+ /* Device to scope logs to */
+ struct device *dev;
+ /* Kobject of the sysfs file */
+ struct kobject *parent;
+ /* Pointer to base structure */
+ struct quotas *base;
+ /* Argument passed to the quota_ops when this quota is modified */
+ void *ops_arg;
+ /* Value of the quota */
+ int val;
+};
+
+struct quota_ops {
+ /**
+ * Called before sysfs store(). store() will proceed if returns 0.
+ * It is called with struct quotas::lock taken.
+ */
+ int (*pre_store)(void *arg, struct quota *quota, int new_val);
+ /** called after sysfs store(). */
+ void (*post_store)(void *arg, struct quota *quota, int old_val);
+};
+
+struct quotas {
+ struct quota_ops ops;
+ struct mutex *lock; /* lock taken for each sysfs operation */
+ u32 cnt; /* number of elements in arr */
+ u32 max; /* maximum value for a single quota */
+ u64 max_sum; /* maximum sum of all quotas */
+ struct quota a[0]; /* array of quota assignments */
+};
+
+/**
+ * Allocate and setup quotas structure.
+ *
+ * @p cnt number of quotas to allocate
+ * @p max maximum value of a single quota
+ * @p max_sum maximum sum of all quotas
+ * @p init_val initial value set to all quotas
+ * @p ops callbacks for sysfs manipulation notifications
+ */
+struct quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct quota_ops *ops);
+/**
+ * Frees quota array and any sysfs entries associated with it.
+ */
+void quotas_free(struct quotas *quotas);
+
+/**
+ * Create a sysfs entry controling given quota entry.
+ *
+ * File created under parent will read the current value of the quota and
+ * write will take quotas lock and check if new value does not exceed
+ * configured maximum values.
+ *
+ * @return 0 if succeeded, negative error code otherwise.
+ */
+int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct quota *quota,
+ void *ops_arg);
+/**
+ * Remove sysfs entry for a given quota if it was created.
+ */
+int quota_sysfs_destroy(struct quota *quota);
+
+/**
+ * Return current sum of values for current quota.
+ */
+u64 quotas_get_sum(struct quotas *quotas);
+
+#endif /* QUOTA_H_ */