aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/controller/Kconfig16
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/pci-octeon-pem.c204
-rw-r--r--drivers/pci/controller/pci-octeontx2-pem.c481
-rw-r--r--drivers/pci/endpoint/Kconfig7
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/pcie-armada-dw-ep.c403
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/probe.c15
-rw-r--r--drivers/pci/quirks.c41
10 files changed, 1178 insertions, 1 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 64e2f5e379aa..bdaffe652712 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -197,6 +197,14 @@ config PCI_HOST_THUNDER_PEM
help
Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
+config PCI_HOST_OCTEONTX2_PEM
+ bool "Marvell OcteonTX2 PCIe controller to off-chip devices"
+ depends on ARM64
+ depends on OF
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe support for CN9XXX Marvell OcteonTX2 SoCs.
+
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
depends on ARM64 || COMPILE_TEST
@@ -205,6 +213,14 @@ config PCI_HOST_THUNDER_ECAM
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+config PCI_OCTEON_PEM
+ bool "Marvell Octeon PEM (PCIe MAC) controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want PEM controller support for Marvell ARM64 Octeon SoCs.
+
config PCIE_ROCKCHIP
bool
depends on PCI
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 04c6edc285c5..a9e1d9deb4e0 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -50,5 +50,7 @@ obj-y += mobiveil/
ifdef CONFIG_PCI
obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-octeontx2-pem.o
+obj-$(CONFIG_ARM64) += pci-octeon-pem.o
obj-$(CONFIG_ARM64) += pci-xgene.o
endif
diff --git a/drivers/pci/controller/pci-octeon-pem.c b/drivers/pci/controller/pci-octeon-pem.c
new file mode 100644
index 000000000000..b5b3e894c185
--- /dev/null
+++ b/drivers/pci/controller/pci-octeon-pem.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Octeon PEM driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define DRV_NAME "octeon-pem"
+#define DRV_VERSION "1.0"
+
+#define PCI_DEVID_OCTEON_PEM 0xA06C
+
+#define ID_SHIFT 36
+#define DOMAIN_OFFSET 0x3
+#define RST_INT_OFFSET 0x300
+#define RST_INT_ENA_W1C_OFFSET 0x310
+#define RST_INT_ENA_W1S_OFFSET 0x318
+#define RST_INT_LINKDOWN BIT(1)
+
+struct pem_ctlr {
+ int index;
+ char irq_name[32];
+ void __iomem *base;
+ struct pci_dev *pdev;
+ struct work_struct recover_rc_work;
+};
+
+static void pem_recover_rc_link(struct work_struct *ws)
+{
+ struct pem_ctlr *pem = container_of(ws, struct pem_ctlr,
+ recover_rc_work);
+ struct pci_dev *pem_dev = pem->pdev;
+ struct pci_dev *root_port;
+ struct pci_bus *bus;
+ int rc_domain;
+
+ rc_domain = pem->index + DOMAIN_OFFSET;
+
+ root_port = pci_get_domain_bus_and_slot(rc_domain, 0, 0);
+ if (!root_port) {
+ dev_err(&pem_dev->dev, "failed to get root port\n");
+ return;
+ }
+
+ pci_lock_rescan_remove();
+
+ /* Clean-up device and RC bridge */
+ pci_stop_and_remove_bus_device(root_port);
+
+ /*
+ * Hardware resets and initializes config space of RC bridge
+ * on every link down event with auto-mode in use.
+ * Re-scan will setup RC bridge cleanly in kernel
+ * after removal and to be ready for next link-up event.
+ */
+ bus = NULL;
+ while ((bus = pci_find_next_bus(bus)) != NULL)
+ if (bus->domain_nr == rc_domain)
+ pci_rescan_bus(bus);
+ pci_unlock_rescan_remove();
+ pci_dev_put(root_port);
+
+ /* Ack interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_OFFSET);
+ /* Enable RST_INT[LINKDOWN] interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_ENA_W1S_OFFSET);
+}
+
+irqreturn_t pem_irq_handler(int irq, void *dev_id)
+{
+ struct pem_ctlr *pem = (struct pem_ctlr *)dev_id;
+
+ /* Disable RST_INT[LINKDOWN] interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_ENA_W1C_OFFSET);
+ schedule_work(&pem->recover_rc_work);
+
+ return IRQ_HANDLED;
+}
+
+static int pem_register_interrupts(struct pci_dev *pdev)
+{
+ struct pem_ctlr *pem = pci_get_drvdata(pdev);
+ int nvec, err;
+
+ nvec = pci_msix_vec_count(pdev);
+ /* Some earlier silicon versions do not support RST vector
+ * so check on table size before registering otherwise
+ * return with info message.
+ */
+ if (nvec != 10) {
+ dev_info(&pdev->dev,
+ "No RST MSI-X vector support on silicon\n");
+ return 0;
+ }
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed %d\n",
+ nvec);
+ return -ENOSPC;
+ }
+
+ snprintf(pem->irq_name, 32, "PEM%d RST_INT", pem->index);
+
+ /* register interrupt for RST_INT */
+ return devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 9),
+ pem_irq_handler, 0,
+ pem->irq_name, pem);
+}
+
+static int pem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct pem_ctlr *pem;
+ int err;
+
+ pem = devm_kzalloc(dev, sizeof(struct pem_ctlr), GFP_KERNEL);
+ if (pem == NULL)
+ return -ENOMEM;
+
+ pem->pdev = pdev;
+ pci_set_drvdata(pdev, pem);
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto region_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /* CSR Space mapping */
+ pem->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!pem->base) {
+ dev_err(&pdev->dev, "Unable to map BAR0\n");
+ err = -ENODEV;
+ goto bar0_map_failed;
+ }
+ pem->index = ((u64)pci_resource_start(pdev, 0) >> ID_SHIFT) & 0xf;
+
+ err = pem_register_interrupts(pdev);
+ if (err < 0) {
+ dev_err(dev, "Register interrupt failed\n");
+ goto irq_failed;
+ }
+
+ INIT_WORK(&pem->recover_rc_work, pem_recover_rc_link);
+
+ /* Enable RST_INT[LINKDOWN] interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_ENA_W1S_OFFSET);
+
+ dev_info(&pdev->dev, "PEM%d probed\n", pem->index);
+ return 0;
+
+irq_failed:
+bar0_map_failed:
+ pci_release_regions(pdev);
+region_failed:
+enable_failed:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void pem_remove(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+}
+
+/* Supported devices */
+static const struct pci_device_id pem_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, PCI_DEVID_OCTEON_PEM)},
+ {0} /* end of table */
+};
+
+static struct pci_driver pem_driver = {
+ .name = DRV_NAME,
+ .id_table = pem_id_table,
+ .probe = pem_probe,
+ .remove = pem_remove,
+};
+
+module_pci_driver(pem_driver);
+
+MODULE_AUTHOR("Marvell Inc.");
+MODULE_DESCRIPTION("Marvell Octeon PEM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pem_id_table);
diff --git a/drivers/pci/controller/pci-octeontx2-pem.c b/drivers/pci/controller/pci-octeontx2-pem.c
new file mode 100644
index 000000000000..71e727c6962e
--- /dev/null
+++ b/drivers/pci/controller/pci-octeontx2-pem.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PCIe host controller
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_OCTEONTX2_PEM)
+
+/* Bridge config space reads/writes done using
+ * these registers.
+ */
+#define PEM_CFG_WR 0x18
+#define PEM_CFG_RD 0x20
+
+#define PCIERC_RAS_EINJ_EN 0x348
+#define PCIERC_RAS_EINJ_CTL6CMPP0 0x364
+#define PCIERC_RAS_EINJ_CTL6CMPV0 0x374
+#define PCIERC_RAS_EINJ_CTL6CHGP1 0x388
+#define PCIERC_RAS_EINJ_CTL6CHGV1 0x398
+#define PCIERC_RAS_EINJ_CTL6PE 0x3A4
+#define PCIERC_RASDP_EP_CTL 0x420
+#define PCIERC_RASDP_DE_ME 0x440
+
+struct octeontx2_pem_pci {
+ u32 ea_entry[3];
+ void __iomem *pem_reg_base;
+};
+
+static int octeontx2_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct octeontx2_pem_pci *pem_pci;
+ u64 read_val;
+
+ if (devfn != 0 || where >= 2048) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ pem_pci = (struct octeontx2_pem_pci *)cfg->priv;
+
+ /*
+ * 32-bit accesses only. Write the address to the low order
+ * bits of PEM_CFG_RD, then trigger the read by reading back.
+ * The config data lands in the upper 32-bits of PEM_CFG_RD.
+ */
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+
+ /* HW reset value at few config space locations are
+ * garbage, fix them.
+ */
+ switch (where & ~3) {
+ case 0x00: /* DevID & VenID */
+ read_val = 0xA02D177D;
+ break;
+ case 0x04:
+ read_val = 0x00100006;
+ break;
+ case 0x08:
+ read_val = 0x06040100;
+ break;
+ case 0x0c:
+ read_val = 0x00010000;
+ break;
+ case 0x18:
+ read_val = 0x00010100;
+ break;
+ case 0x40:
+ read_val &= 0xffff00ff;
+ read_val |= 0x00005000; /* In RC mode, point to EA capability */
+ break;
+ case 0x5c: /* EA_ENTRY2 */
+ read_val = pem_pci->ea_entry[0];
+ break;
+ case 0x60: /* EA_ENTRY3 */
+ read_val = pem_pci->ea_entry[1];
+ break;
+ case 0x64: /* EA_ENTRY4 */
+ read_val = pem_pci->ea_entry[2];
+ break;
+ case 0x70: /* Express Cap */
+ /* HW reset value is '0', set PME interrupt vector to 1 */
+ if (!(read_val & (0x1f << 25)))
+ read_val |= (1u << 25);
+ break;
+ default:
+ break;
+ }
+ read_val >>= (8 * (where & 3));
+ switch (size) {
+ case 1:
+ read_val &= 0xff;
+ break;
+ case 2:
+ read_val &= 0xffff;
+ break;
+ default:
+ break;
+ }
+ *val = read_val;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int octeontx2_pem_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return octeontx2_pem_bridge_read(bus, devfn, where, size, val);
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+/*
+ * Some of the w1c_bits below also include read-only or non-writable
+ * reserved bits, this makes the code simpler and is OK as the bits
+ * are not affected by writing zeros to them.
+ */
+static u32 octeontx2_pem_bridge_w1c_bits(u64 where_aligned)
+{
+ u32 w1c_bits = 0;
+
+ switch (where_aligned) {
+ case 0x04: /* Command/Status */
+ case 0x1c: /* Base and I/O Limit/Secondary Status */
+ w1c_bits = 0xff000000;
+ break;
+ case 0x44: /* Power Management Control and Status */
+ w1c_bits = 0xfffffe00;
+ break;
+ case 0x78: /* Device Control/Device Status */
+ case 0x80: /* Link Control/Link Status */
+ case 0x88: /* Slot Control/Slot Status */
+ case 0x90: /* Root Status */
+ case 0xa0: /* Link Control 2 Registers/Link Status 2 */
+ w1c_bits = 0xffff0000;
+ break;
+ case 0x104: /* Uncorrectable Error Status */
+ case 0x110: /* Correctable Error Status */
+ case 0x130: /* Error Status */
+ case 0x180: /* Lane error status */
+ w1c_bits = 0xffffffff;
+ break;
+ default:
+ break;
+ }
+ return w1c_bits;
+}
+
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 octeontx2_pem_bridge_w1_bits(u64 where_aligned)
+{
+ u32 w1_bits;
+
+ switch (where_aligned) {
+ case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+ /* Force 32-bit I/O addressing. */
+ w1_bits = 0x0101;
+ break;
+ case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+ /* Force 64-bit addressing */
+ w1_bits = 0x00010001;
+ break;
+ default:
+ w1_bits = 0;
+ break;
+ }
+ return w1_bits;
+}
+
+static int octeontx2_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct octeontx2_pem_pci *pem_pci;
+ u64 where_aligned = where & ~3ull;
+ u64 write_val, read_val;
+ u32 mask = 0;
+
+
+ if (devfn != 0 || where >= 2048)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ pem_pci = (struct octeontx2_pem_pci *)cfg->priv;
+
+ /*
+ * 32-bit accesses only. If the write is for a size smaller
+ * than 32-bits, we must first read the 32-bit value and merge
+ * in the desired bits and then write the whole 32-bits back
+ * out.
+ */
+ switch (size) {
+ case 1:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ case 2:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xffff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xffff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * By expanding the write width to 32 bits, we may
+ * inadvertently hit some W1C bits that were not intended to
+ * be written. Calculate the mask that must be applied to the
+ * data to be written to avoid these cases.
+ */
+ if (mask) {
+ u32 w1c_bits = octeontx2_pem_bridge_w1c_bits(where);
+
+ if (w1c_bits) {
+ mask &= w1c_bits;
+ val &= ~mask;
+ }
+ }
+
+ /*
+ * Some bits must be read-only with value of one. Since the
+ * access method allows these to be cleared if a zero is
+ * written, force them to one before writing.
+ */
+ val |= octeontx2_pem_bridge_w1_bits(where_aligned);
+
+ /*
+ * Low order bits are the config address, the high order 32
+ * bits are the data to be written.
+ */
+ write_val = (((u64)val) << 32) | where_aligned;
+ writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static void octeontx2_be_workaround_init(struct pci_bus *bus)
+{
+ u32 val;
+
+ /* Ensure that PCIERC_RASDP_DE_ME.ERR_MODE is set to 0 */
+ octeontx2_pem_bridge_read(bus, 0x00,
+ PCIERC_RASDP_DE_ME, 4, &val);
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RASDP_DE_ME, 4, val & ~BIT(0));
+
+ /* Disable parity error correction */
+ octeontx2_pem_bridge_read(bus, 0x00,
+ PCIERC_RASDP_EP_CTL, 4, &val);
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RASDP_EP_CTL, 4, val | BIT(0));
+
+ /* Enable RAS to change header
+ * PCIERC_RAS_EINJ_EN.EINJ0_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ1_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ2_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ3_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ4_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ5_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ6_EN.set(1);
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_EN, 4, BIT(6));
+
+ /* Set up error injection count to 1 and
+ * set type to TLP and INV_CNTRL must be 0.
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6PE, 4, 1);
+
+ /* Set up compare point to compare Fmt/Type field in TLP Header word 0
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ *
+ * PCIERC_RAS_EINJ_CTL6CMPP0.EINJ6_COM_PT_H0.set(32'hfe00_0000);
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CMPP0, 4, 0xFE000000);
+
+ /* Set up the value to compare against,
+ * look for Fmt/Type to indicate CfgRd/CfWr - both type 0 or 1.
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24]
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CMPV0, 4, 0x44000000);
+
+ /* Set up the bit position in TLP Header word 1 to replace
+ * (LBE is bits 7:4, FBE is bits 3:0).
+ *
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGP1, 4, 0xFF);
+}
+
+static void octeontx2_be_workaround(struct pci_bus *bus, int where,
+ int size, u32 val)
+{
+ struct pci_host_bridge *rc;
+ u32 reg, be = 0;
+
+ rc = pci_find_host_bridge(bus);
+
+ /* Setup RAS to inject one error */
+ octeontx2_be_workaround_init(rc->bus);
+
+ /* Get byte-enable to inject into TLP */
+ where &= 0x03;
+ switch (size) {
+ case 1:
+ be = 1 << where;
+ break;
+ case 2:
+ be = 3 << where;
+ break;
+ case 4:
+ be = 0xF;
+ }
+
+ /* Set up the value you'd like to use for FBE (Cfg ops must have LBE==0)
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ */
+ octeontx2_pem_bridge_write(rc->bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGV1, 4, be);
+
+ /* To be absolutely sure that the ECAM access does not get to
+ * the MAC prior to the PCIERC register updates that are setting
+ * up for that ECAM access, SW should read back one of the
+ * registers it wrote before launching the ECAM access.
+ */
+ octeontx2_pem_bridge_read(rc->bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGV1, 4, &reg);
+}
+
+static int octeontx2_pem_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return octeontx2_pem_bridge_write(bus, devfn, where, size, val);
+
+ octeontx2_be_workaround(bus, where, size, val);
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static int octeontx2_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
+{
+ struct octeontx2_pem_pci *pem_pci;
+ resource_size_t bar4_start;
+
+ pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
+ if (!pem_pci)
+ return -ENOMEM;
+
+ pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
+ if (!pem_pci->pem_reg_base)
+ return -ENOMEM;
+
+ /*
+ * The MSI-X BAR for the PEM and AER interrupts is located at
+ * a fixed offset from the PEM register base. Generate a
+ * fragment of the synthesized Enhanced Allocation capability
+ * structure here for the BAR.
+ */
+ bar4_start = res_pem->start + 0xf00000000;
+ pem_pci->ea_entry[0] = (u32)bar4_start | 2;
+ pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+
+ cfg->priv = pem_pci;
+ return 0;
+}
+
+static int octeontx2_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev;
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ pdev = to_platform_device(dev);
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return octeontx2_pem_init(dev, cfg, res_pem);
+}
+
+static struct pci_ecam_ops pci_octeontx2_pem_ops = {
+ .bus_shift = 20,
+ .init = octeontx2_pem_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = octeontx2_pem_config_read,
+ .write = octeontx2_pem_config_write,
+ }
+};
+
+static const struct of_device_id octeontx2_pem_of_match[] = {
+ {
+ .compatible = "marvell,pci-host-octeontx2-pem",
+ .data = &pci_octeontx2_pem_ops,
+ },
+ { },
+};
+
+static int octeontx2_pem_probe(struct platform_device *pdev)
+{
+ return pci_host_common_probe(pdev);
+}
+
+static struct platform_driver octeontx2_pem_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = octeontx2_pem_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = octeontx2_pem_probe,
+};
+builtin_platform_driver(octeontx2_pem_driver);
+
+#endif
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 17bbdc9bbde0..58bd2a3b18b1 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -28,6 +28,13 @@ config PCI_ENDPOINT_CONFIGFS
configure the endpoint function and used to bind the
function with a endpoint controller.
+config PCIE_ARMADA_DW_EP
+ bool "Armada DesignWare PCI End-point driver "
+ depends on OF && HAS_IOMEM
+ help
+ Enable this configuration option to support configurable Armada
+ PCI endpoint.
+
source "drivers/pci/endpoint/functions/Kconfig"
endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
index 95b2fe47e3b0..6d51b3e38dde 100644
--- a/drivers/pci/endpoint/Makefile
+++ b/drivers/pci/endpoint/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
pci-epc-mem.o functions/
+obj-$(CONFIG_PCIE_ARMADA_DW_EP) += pcie-armada-dw-ep.o
diff --git a/drivers/pci/endpoint/pcie-armada-dw-ep.c b/drivers/pci/endpoint/pcie-armada-dw-ep.c
new file mode 100644
index 000000000000..391e70397d05
--- /dev/null
+++ b/drivers/pci/endpoint/pcie-armada-dw-ep.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe end point controller driver for Marvell Armada
+ *
+ * Armada PCIe Glue Layer Source Code
+ *
+ * Based on Armada-SP2 PCIe end-point driver
+ */
+#define MODULE_NAME "armada-pcie-ep"
+
+#include <linux/armada-pcie-ep.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <uapi/linux/pci_regs.h>
+#include <linux/memory.h>
+
+#define PCIE_GLOBAL_CTRL 0x0
+#define PCIE_GLOBAL_CTRL_CRS_EN (1 << 9)
+#define PCIE_GLOBAL_CTRL_TYPE_OFF 4
+#define PCIE_GLOBAL_CTRL_TYPE_MASK 0xF
+#define PCIE_GLOBAL_CTRL_TYPE_RC (0x4)
+
+#define PCIE_ATU_VIEWPORT 0x900
+#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
+#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
+#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_CR1_FUNC_OFF 20
+#define PCIE_ATU_CR1_FUNC_MASK 0x1F
+#define PCIE_ATU_TYPE_MEM (0x0 << 0)
+#define PCIE_ATU_TYPE_IO (0x2 << 0)
+#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
+#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_CR2_REGION_EN (0x1 << 31)
+#define PCIE_ATU_CR2_BAR_EN (0x1 << 30)
+#define PCIE_ATU_CR2_FUNC_EN (0x1 << 19)
+#define PCIE_ATU_CR2_BAR_OFF 8
+#define PCIE_ATU_LOWER_BASE 0x90C
+#define PCIE_ATU_UPPER_BASE 0x910
+#define PCIE_ATU_LIMIT 0x914
+#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
+#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
+#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+#define PCIE_ATU_UPPER_TARGET 0x91C
+
+#define PCIE_CAP_MSI_OFFSET 0x50
+#define PCIE_MSI_MSG_CTL 0x2
+#define PCIE_MSI_MSG_ADDR_L 0x4
+#define PCIE_MSI_MSG_ADDR_H 0x8
+#define PCI_MSI_FLAGS_QSIZE_OFF 4
+#define PCIE_MSI_MSG_DATA(is_64) (is_64 ? 0xC : 0x8)
+
+#define PCIE_SRIOV_DEVID_OFFSET 0x192
+
+#define PCIE_RESBAR_EXT_CAP_HDR_REG 0x25c
+#define PCIE_RESBAR_EXT_CAP_REG(bar) (PCIE_RESBAR_EXT_CAP_HDR_REG + 4 + \
+ (((bar) / 2 + (bar) % 2) & 0x3) * 8)
+#define PCIE_RESBAR_EXT_CAP_REG_MASK 0x000fffff
+#define PCIE_RESBAR_EXT_CAP_REG_SHIFT 4
+
+#define PCIE_BAR_IS_RESIZABLE(bar) ((bar) == 5 || (bar) == 4 || \
+ (bar) == 2 || (bar) == 0)
+#define MAX_ATU_REGIONS 16
+#define MAX_ATU_SIZE (4ul * SZ_1G)
+
+#define BAR_ENABLE_OFFSET 0
+#define BAR_ENABLE_MASK (1 << BAR_ENABLE_OFFSET)
+
+struct armada_pcie_ep {
+ void __iomem *regs;
+ void __iomem *shadow_regs;
+ void __iomem *lm_regs;
+ void __iomem *pl_regs; /*port logical register only PF0*/
+ struct device *dev;
+ struct clk *clk;
+};
+
+#define cfg_space_addr(func_id) (0x1000 * (func_id))
+
+#define cfg_func_base(ep, func_id, off) \
+ ((ep)->regs + cfg_space_addr(func_id) + (off))
+
+#define cfg_shadow_func_base(ep, func_id, off) \
+ ((ep)->shadow_regs + cfg_space_addr(func_id) + (off))
+
+
+#define get_out_region_idx(func_id, id) (func_id + id)
+#define get_in_region_idx(func_id, bar) (func_id + bar)
+
+struct armada_pcie_ep *armada_ep;
+
+void armada_pcie_ep_setup_bar(void *ep_hdl, int func_id, u32 bar_num, u32 props,
+ u64 sz)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ u32 space_type = props & PCI_BASE_ADDRESS_SPACE;
+ u32 sz_type = (props & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
+ u32 v = 0;
+ void __iomem *resbar = ep->pl_regs + PCIE_RESBAR_EXT_CAP_REG(bar_num);
+ void __iomem *bar = cfg_func_base(ep, func_id,
+ PCI_BASE_ADDRESS_0 + (bar_num * 4));
+ void __iomem *bar_mask = cfg_shadow_func_base(ep, func_id,
+ PCI_BASE_ADDRESS_0 + (bar_num * 4));
+
+ dev_dbg(ep->dev, "func%d: BAR%d size=0x%llx set requested\n",
+ func_id, bar_num, sz);
+ if (space_type == PCI_BASE_ADDRESS_SPACE_IO) {
+ v = props & (~PCI_BASE_ADDRESS_IO_MASK);
+ writel_relaxed(v, bar);
+ } else {
+ /* clear the top 32 bits of the size */
+ if (sz_type == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ writel_relaxed(0, bar + 4);
+
+ v = props & (~PCI_BASE_ADDRESS_MEM_MASK);
+ writel_relaxed(v, bar);
+ }
+
+ /*
+ * Set the BAR using resizable BAR capability registers
+ * The minimum (and the default) BAR size is 1MB
+ * Once the Resizable BAR capability register is set
+ * the resizable BAR control register at next offset gets
+ * updated automatically.
+ */
+ if (sz > SZ_1M && PCIE_BAR_IS_RESIZABLE(bar_num)) {
+ /* BAR size should be power of 2 already */
+ v = ((sz >> 20) & PCIE_RESBAR_EXT_CAP_REG_MASK);
+ v <<= PCIE_RESBAR_EXT_CAP_REG_SHIFT;
+ writel_relaxed(v, resbar);
+ }
+
+ /* Enable bar */
+ writel_relaxed(BAR_ENABLE_MASK, bar_mask);
+
+}
+EXPORT_SYMBOL(armada_pcie_ep_setup_bar);
+
+void armada_pcie_ep_disable_bars(void *ep_hdl, int func_id, u16 mask)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *bar_mask = cfg_shadow_func_base(ep, func_id,
+ PCI_BASE_ADDRESS_0);
+ int bar;
+
+ dev_dbg(ep->dev, "func%d: disable BARs 0x%x\n", func_id, mask);
+ mask &= PCIE_EP_ALL_BARS;
+ for (bar = 0; mask; mask >>= 1, bar++) {
+ if (mask & 1)
+ writel_relaxed(0, bar_mask + bar * 4);
+ }
+}
+EXPORT_SYMBOL(armada_pcie_ep_disable_bars);
+
+int armada_pcie_ep_get_msi(void *ep_hdl, int func_id, int vec_id,
+ struct msi_msg *msg)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *p = cfg_func_base(ep, func_id, PCIE_CAP_MSI_OFFSET);
+ u16 flags, vec_cnt;
+
+ /* check if MSI is enabled and there are enough vectors
+ * QSIZE field indicates log2 of the amount of MSI vectors
+ */
+ flags = readw(p + PCI_MSI_FLAGS);
+ vec_cnt =
+ 1 << ((flags & PCI_MSI_FLAGS_QSIZE) >> PCI_MSI_FLAGS_QSIZE_OFF);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE) || (vec_id > vec_cnt))
+ return -EINVAL;
+
+ dev_dbg(ep->dev, "func%d: get msi vector id/counter 0x%x/%d\n",
+ func_id, vec_id, vec_cnt);
+ msg->address_lo = readl(p + PCI_MSI_ADDRESS_LO);
+ if (flags & PCI_MSI_FLAGS_64BIT) {
+ msg->address_hi = readl(p + PCI_MSI_ADDRESS_HI);
+ msg->data = readl(p + PCI_MSI_DATA_64) + vec_id;
+ } else {
+ msg->address_hi = 0;
+ msg->data = readl(p + PCI_MSI_DATA_32) + vec_id;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(armada_pcie_ep_get_msi);
+
+void armada_pcie_ep_cfg_enable(void *ep_hdl, int func_id)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ u32 v;
+
+ dev_dbg(ep->dev, "func%d: config enable\n", func_id);
+ v = readl_relaxed(ep->lm_regs + PCIE_GLOBAL_CTRL);
+ v &= ~PCIE_GLOBAL_CTRL_CRS_EN;
+ writel_relaxed(v, ep->lm_regs + PCIE_GLOBAL_CTRL);
+}
+EXPORT_SYMBOL(armada_pcie_ep_cfg_enable);
+
+/*
+ * Remap the host memory space to the local memory space.
+ * By default the memory spaces conflict so we must offset the
+ * host memory space in our local memory space
+ */
+int armada_pcie_ep_remap_host(void *ep_hdl, u32 func_id, u64 local_base,
+ u64 host_base, u64 size)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *pl_regs = ep->pl_regs;
+ u32 v, region = 0;
+ u64 remain_size = size;
+
+ /* ATU window size must be power of 2 */
+ if (!is_power_of_2(size))
+ return -EINVAL;
+
+ dev_dbg(ep->dev, "func%d: remap local:host(size) %llx:%llx(%llx)\n",
+ func_id, local_base, host_base, size);
+
+ while (remain_size > 0) {
+ if (region > MAX_ATU_REGIONS) {
+ dev_err(ep->dev,
+ "Insufficient ATU regions to map hosts\n");
+ return -1;
+ }
+
+ v = PCIE_ATU_REGION_OUTBOUND;
+ v |= get_out_region_idx(func_id, region);
+ writel_relaxed(v, pl_regs + PCIE_ATU_VIEWPORT);
+
+ writel_relaxed(local_base & U32_MAX,
+ pl_regs + PCIE_ATU_LOWER_BASE);
+ writel_relaxed(local_base >> 32, pl_regs + PCIE_ATU_UPPER_BASE);
+ writel_relaxed(host_base & U32_MAX,
+ pl_regs + PCIE_ATU_LOWER_TARGET);
+ writel_relaxed(host_base >> 32,
+ pl_regs + PCIE_ATU_UPPER_TARGET);
+
+ if (remain_size > MAX_ATU_SIZE)
+ v = MAX_ATU_SIZE - 1;
+ else
+ v = remain_size - 1;
+ writel_relaxed(v, pl_regs + PCIE_ATU_LIMIT);
+
+ v = (func_id & PCIE_ATU_CR1_FUNC_MASK) << PCIE_ATU_CR1_FUNC_OFF;
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR1);
+
+ v = PCIE_ATU_CR2_REGION_EN;
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR2);
+
+ region++;
+ local_base += MAX_ATU_SIZE;
+ host_base += MAX_ATU_SIZE;
+ remain_size -= MAX_ATU_SIZE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(armada_pcie_ep_remap_host);
+
+/* setup the internal target for the BAR. When the PCIe host accesses the bar
+ * it will reach the space defined by "addr" and "size"
+ */
+void armada_pcie_ep_bar_map(void *ep_hdl, u32 func_id, int bar,
+ phys_addr_t addr, u64 size)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *pl_regs = ep->pl_regs;
+ u32 region_indx = get_in_region_idx(func_id, bar);
+ u32 v;
+
+ v = PCIE_ATU_REGION_INBOUND | region_indx;
+ writel_relaxed(v, pl_regs + PCIE_ATU_VIEWPORT);
+
+ addr = addr & ~(size - 1);
+ v = lower_32_bits(addr);
+ writel_relaxed(v, pl_regs + PCIE_ATU_LOWER_TARGET);
+
+ v = upper_32_bits(addr);
+ writel_relaxed(v, pl_regs + PCIE_ATU_UPPER_TARGET);
+
+ v = (func_id & PCIE_ATU_CR1_FUNC_MASK) << PCIE_ATU_CR1_FUNC_OFF;
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR1);
+
+ v = PCIE_ATU_CR2_REGION_EN |
+ PCIE_ATU_CR2_BAR_EN |
+ (bar << PCIE_ATU_CR2_BAR_OFF);
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR2);
+ dev_dbg(ep->dev, "func%d: BAR%d map size@addr %llx@%llx\n",
+ func_id, bar, addr, size);
+}
+EXPORT_SYMBOL(armada_pcie_ep_bar_map);
+
+void *armada_pcie_ep_get(void)
+{
+ return (void *)armada_ep;
+}
+EXPORT_SYMBOL(armada_pcie_ep_get);
+
+static int armada_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct armada_pcie_ep *ep;
+ struct device *dev = &pdev->dev;
+ struct resource *base;
+ void __iomem *p;
+ int ret = 0;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->clk = devm_clk_get(dev, NULL);
+ if (PTR_ERR(ep->clk) == -EPROBE_DEFER) {
+ dev_info(dev, "PCIE EP probe deferred\n");
+ return -EPROBE_DEFER;
+ }
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "can't find clock node\n");
+ return -ENODEV;
+ }
+
+ ret = clk_prepare_enable(ep->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clock\n");
+ return ret;
+ }
+
+ ep->dev = dev;
+ platform_set_drvdata(pdev, ep);
+
+ /* Get registers bases and remap */
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lm");
+ p = devm_ioremap_resource(dev, base);
+ if (IS_ERR(p)) {
+ dev_err(dev, "couldn't remap lm regs base %pR\n", base);
+ return PTR_ERR(p);
+ }
+ ep->lm_regs = p;
+ dev_dbg(dev, "reg-%s va:pa(sz) %llx:%llx(%llx)\n",
+ "lm ", (phys_addr_t)p, base->start,
+ base->end - base->start);
+
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ p = devm_ioremap_resource(dev, base);
+ if (IS_ERR(p)) {
+ dev_err(dev, "couldn't remap core regs base %pR\n", base);
+ return PTR_ERR(p);
+ }
+ ep->regs = p;
+ ep->pl_regs = p;
+ dev_dbg(dev, "reg-%s va:pa(sz) %llx:%llx(%llx)\n",
+ "core ", (phys_addr_t)p, base->start,
+ base->end - base->start);
+
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "shadow_core");
+ p = devm_ioremap_resource(dev, base);
+ if (IS_ERR(p)) {
+ dev_err(dev, "%s: couldn't remap shadow regs base %pR\n",
+ MODULE_NAME, base);
+ return PTR_ERR(p);
+ }
+ ep->shadow_regs = p;
+ dev_dbg(dev, "reg-%s va:pa(sz) %llx:%llx(%llx)\n",
+ "shadow", (phys_addr_t)p, base->start,
+ base->end - base->start);
+
+ armada_ep = ep;
+
+ return 0;
+}
+
+static const struct of_device_id armada_pcie_ep_of_match[] = {
+ { .compatible = "marvell,armada-pcie-ep", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, armada_pcie_ep_of_match);
+
+static struct platform_driver armada_pcie_ep_driver = {
+ .probe = armada_pcie_ep_probe,
+ .driver = {
+ .name = "armada-pcie-ep",
+ .of_match_table = of_match_ptr(armada_pcie_ep_of_match),
+ },
+};
+
+module_platform_driver(armada_pcie_ep_driver);
+
+MODULE_DESCRIPTION("Armada PCIe EP controller driver");
+MODULE_AUTHOR("Gang Chen <gangc@marvell.com>");
+MODULE_AUTHOR("Yehuda Yitshcak <yehuday@marvell.com>");
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 3779b264dbec..a785019eb46b 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -221,7 +221,16 @@ static int get_port_device_capability(struct pci_dev *dev)
}
#ifdef CONFIG_PCIEAER
+ /*
+ * Some AER interrupt capability registers may not be present on
+ * non Root ports. Since there is no way to check presence of
+ * ROOT_ERR_COMMAND and ROOT_ERR_STATUS registers. Allow AER
+ * service only on root ports. Refer PCIe rev5.0 spec v1.0 7.8.4.
+ * Otherwise AER interrupt message number is read incorrectly
+ * causing MSIX vector registration to fail and fallback to legacy.
+ */
if (dev->aer_cap && pci_aer_available() &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
(pcie_ports_native || host->native_aer)) {
services |= PCIE_PORT_SERVICE_AER;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ece90a23936d..62797b6d803e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1362,6 +1362,21 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
/* We need to blast all three values with a single write */
pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
+ /* When Marvell PCI Bridge links up in Gen1 speed,
+ * back-to-back write to primary bus register
+ * and immediate scan for devices on secondary
+ * bus will not reach end-point devices.
+ * Before the write takes in effect in hardware,
+ * read of vendor & device id on endpoint may return
+ * 0xffff as bus numbers are set to 0 in earlier
+ * write on primary bus register.
+ * To workaround this issue perform a read of primary bus
+ * register after the write which allows write to go
+ * through only for this bridge.
+ */
+ if (dev->vendor == PCI_VENDOR_ID_CAVIUM && dev->device == 0xa02d)
+ pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
+
if (!is_cardbus) {
child->bridge_ctl = bctl;
max = pci_scan_child_bus_extend(child, available_buses);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1be2894ada70..d274e07b3386 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4497,7 +4497,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
- if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ if (!pci_is_pcie(dev))
return false;
switch (dev->device) {
@@ -5777,3 +5777,42 @@ static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
+
+/* Marvell cnf10ka (0xba00) requires fix for device at slot (0xe), func. 0x0
+ * Wrong values for BAR0 and BAR4 are fetched from config space.
+ * There are some devices that doesn't require fixing, so the fix is not always
+ * applied. Deciding factor is curent value of BAR0/BAR4.
+ * Config. space for cnf10ka is read-only, Changing the registers isn't possible
+ */
+#define CAVIUM_XCP0_ADDR_OK 0x000082c000000000ULL /* Correct PCI BAR base */
+#define CAVIUM_XCP0_FIX_MASK 0xffffffff00000000ULL
+#define CAVIUM_XCP0_SHOULD_FIX(addr) \
+ (((addr) & CAVIUM_XCP0_FIX_MASK) != CAVIUM_XCP0_ADDR_OK)
+#define CAVIUM_XCP0_FIX_ADDR(addr) \
+ (((addr) & (~CAVIUM_XCP0_FIX_MASK)) | CAVIUM_XCP0_ADDR_OK)
+
+static void quirk_cavium_xcp0_bar_fixup(struct pci_dev *dev)
+{
+ int i;
+
+ if (dev->subsystem_device == 0xba00 && dev->devfn == 0xe0) {
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+
+ struct resource *r = &dev->resource[i];
+ int ret;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+
+ /* There are revision of HW that not need fixup */
+ if (CAVIUM_XCP0_SHOULD_FIX(r->start)) {
+ r->start = CAVIUM_XCP0_FIX_ADDR(r->start);
+ r->end = CAVIUM_XCP0_FIX_ADDR(r->end);
+ ret = pci_claim_resource(dev, i);
+ pci_info(dev, "Fixup (%d) %llx - %llx/%lx. (%d)\n",
+ i, r->start, r->end, r->flags, ret);
+ }
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa067, quirk_cavium_xcp0_bar_fixup);