aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/access.c8
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/dwc/pcie-qcom.c12
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c10
-rw-r--r--drivers/pci/host/pci-aardvark.c622
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/host/pci-thunder-pem.c13
-rw-r--r--drivers/pci/host/pci-xgene-msi.c10
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c13
-rw-r--r--drivers/pci/host/pcie-iproc.c10
-rw-r--r--drivers/pci/host/pcie-mediatek.c7
-rw-r--r--drivers/pci/host/pcie-rcar.c9
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c12
-rw-r--r--drivers/pci/host/vmd.c8
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c14
-rw-r--r--drivers/pci/msi.c154
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/pci/pci.c31
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/pci/pcie/aspm.c33
-rw-r--r--drivers/pci/pcie/ptm.c22
-rw-r--r--drivers/pci/probe.c30
-rw-r--r--drivers/pci/quirks.c332
-rw-r--r--drivers/pci/slot.c12
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/pci/syscall.c14
28 files changed, 1081 insertions, 332 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 913d6722ece9..8c585e7ca520 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -205,17 +205,13 @@ EXPORT_SYMBOL(pci_bus_set_ops);
static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
static noinline void pci_wait_cfg(struct pci_dev *dev)
+ __must_hold(&pci_lock)
{
- DECLARE_WAITQUEUE(wait, current);
-
- __add_wait_queue(&pci_cfg_wait, &wait);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
- schedule();
+ wait_event(pci_cfg_wait, !dev->block_cfg_access);
raw_spin_lock_irq(&pci_lock);
} while (dev->block_cfg_access);
- __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index bc56cf19afd3..8f78e8c15d2e 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -324,12 +324,8 @@ void pci_bus_add_device(struct pci_dev *dev)
dev->match_driver = true;
retval = device_attach(&dev->dev);
- if (retval < 0 && retval != -EPROBE_DEFER) {
+ if (retval < 0 && retval != -EPROBE_DEFER)
dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
- pci_proc_detach_device(dev);
- pci_remove_sysfs_dev_files(dev);
- return;
- }
dev->is_added = 1;
}
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index ce7ba5b7552a..b84603f52dc1 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -96,6 +96,7 @@ struct qcom_pcie_resources_2_1_0 {
struct reset_control *ahb_reset;
struct reset_control *por_reset;
struct reset_control *phy_reset;
+ struct reset_control *ext_reset;
struct regulator *vdda;
struct regulator *vdda_phy;
struct regulator *vdda_refclk;
@@ -265,6 +266,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (IS_ERR(res->por_reset))
return PTR_ERR(res->por_reset);
+ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
+ if (IS_ERR(res->ext_reset))
+ return PTR_ERR(res->ext_reset);
+
res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
return PTR_ERR_OR_ZERO(res->phy_reset);
}
@@ -277,6 +282,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->axi_reset);
reset_control_assert(res->ahb_reset);
reset_control_assert(res->por_reset);
+ reset_control_assert(res->ext_reset);
reset_control_assert(res->pci_reset);
clk_disable_unprepare(res->iface_clk);
clk_disable_unprepare(res->core_clk);
@@ -342,6 +348,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
goto err_deassert_ahb;
}
+ ret = reset_control_deassert(res->ext_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ext reset\n");
+ goto err_deassert_ahb;
+ }
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 83b7d5d3fc3e..60fbfe92e0ef 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -90,6 +90,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -133,7 +134,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -141,15 +142,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + (pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -175,7 +179,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 5f3048e75bec..9ae544e113dc 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -12,6 +12,7 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -20,6 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_gpio.h>
#include <linux/of_pci.h>
/* PCIe core registers */
@@ -27,16 +29,7 @@
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
-#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
-#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
-#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
-#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
-#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
-#define PCIE_CORE_LINK_TRAINING BIT(5)
-#define PCIE_CORE_LINK_WIDTH_SHIFT 20
+#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
@@ -55,7 +48,8 @@
#define PIO_COMPLETION_STATUS_UR 1
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
-#define PIO_NON_POSTED_REQ BIT(0)
+#define PIO_NON_POSTED_REQ BIT(10)
+#define PIO_ERR_STATUS BIT(11)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -99,45 +93,109 @@
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
-#define PCIE_ISR0_ALL_MASK GENMASK(26, 0)
+#define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
#define PCIE_ISR1_FLUSH BIT(5)
#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
-#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
+#define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
/* PCIe window configuration */
#define OB_WIN_BASE_ADDR 0x4c00
#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_COUNT 8
#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
OB_WIN_BLOCK_SIZE * (win) + \
(offset))
#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_ENABLE BIT(0)
#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
-
-/* PCIe window types */
-#define OB_PCIE_MEM 0x0
-#define OB_PCIE_IO 0x4
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
+#define OB_WIN_FUNC_NUM_SHIFT 24
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
+#define OB_WIN_MSG_CODE_SHIFT 14
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
+#define OB_WIN_ATTR_ENABLE BIT(11)
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
+#define OB_WIN_ATTR_TC_SHIFT 8
+#define OB_WIN_ATTR_RELAXED BIT(7)
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
+#define OB_WIN_ATTR_POISON BIT(5)
+#define OB_WIN_ATTR_IDO BIT(4)
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
+#define OB_WIN_TYPE_SHIFT 0
+#define OB_WIN_TYPE_MEM 0x0
+#define OB_WIN_TYPE_IO 0x4
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
+#define OB_WIN_TYPE_MSG 0xc
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
#define LTSSM_SHIFT 24
#define LTSSM_MASK 0x3f
-#define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300
+/* LTSSM values in CFG_REG */
+enum {
+ LTSSM_DETECT_QUIET = 0x0,
+ LTSSM_DETECT_ACTIVE = 0x1,
+ LTSSM_POLLING_ACTIVE = 0x2,
+ LTSSM_POLLING_COMPLIANCE = 0x3,
+ LTSSM_POLLING_CONFIGURATION = 0x4,
+ LTSSM_CONFIG_LINKWIDTH_START = 0x5,
+ LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
+ LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
+ LTSSM_CONFIG_LANENUM_WAIT = 0x8,
+ LTSSM_CONFIG_COMPLETE = 0x9,
+ LTSSM_CONFIG_IDLE = 0xa,
+ LTSSM_RECOVERY_RCVR_LOCK = 0xb,
+ LTSSM_RECOVERY_SPEED = 0xc,
+ LTSSM_RECOVERY_RCVR_CFG = 0xd,
+ LTSSM_RECOVERY_IDLE = 0xe,
+ LTSSM_L0 = 0x10,
+ LTSSM_RX_L0S_ENTRY = 0x11,
+ LTSSM_RX_L0S_IDLE = 0x12,
+ LTSSM_RX_L0S_FTS = 0x13,
+ LTSSM_TX_L0S_ENTRY = 0x14,
+ LTSSM_TX_L0S_IDLE = 0x15,
+ LTSSM_TX_L0S_FTS = 0x16,
+ LTSSM_L1_ENTRY = 0x17,
+ LTSSM_L1_IDLE = 0x18,
+ LTSSM_L2_IDLE = 0x19,
+ LTSSM_L2_TRANSMIT_WAKE = 0x1a,
+ LTSSM_DISABLED = 0x20,
+ LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
+ LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
+ LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
+ LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
+ LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
+ LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
+ LTSSM_HOT_RESET = 0x27,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
+};
+
/* PCIe core controller registers */
#define CTRL_CORE_BASE_ADDR 0x18000
#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
@@ -168,7 +226,7 @@
#define PCIE_IRQ_MSI_INT2_DET BIT(21)
#define PCIE_IRQ_RC_DBELL_DET BIT(22)
#define PCIE_IRQ_EP_STATUS BIT(23)
-#define PCIE_IRQ_ALL_MASK 0xfff0fb
+#define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
/* Transaction types */
@@ -185,7 +243,8 @@
(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
-#define PIO_TIMEOUT_MS 1
+#define PIO_RETRY_CNT 750000 /* 1.5 s */
+#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -197,8 +256,16 @@ struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
struct list_head resources;
+ struct {
+ phys_addr_t match;
+ phys_addr_t remap;
+ phys_addr_t mask;
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
+ raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
struct irq_chip msi_bottom_irq_chip;
@@ -208,6 +275,8 @@ struct advk_pcie {
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
+ int link_gen;
+ struct gpio_desc *reset_gpio;
};
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
@@ -220,52 +289,155 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
return readl(pcie->base + reg);
}
-static int advk_pcie_link_up(struct advk_pcie *pcie)
+static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
{
- u32 val, ltssm_state;
+ u32 val;
+ u8 ltssm_state;
val = advk_readl(pcie, CFG_REG);
ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
- return ltssm_state >= LTSSM_L0;
+ return ltssm_state;
+}
+
+static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
+{
+ /* check if LTSSM is in normal operation - some L* state */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
+}
+
+static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
+{
+ /*
+ * According to PCIe Base specification 3.0, Table 4-14: Link
+ * Status Mapped to the LTSSM is Link Training mapped to LTSSM
+ * Configuration and Recovery states.
+ */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
+ ltssm_state < LTSSM_L0) ||
+ (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
+ ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
}
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
- struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (advk_pcie_link_up(pcie)) {
- dev_info(dev, "link up\n");
+ if (advk_pcie_link_up(pcie))
return 0;
- }
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+ if (!pcie->reset_gpio)
+ return;
+
+ /* 10ms delay is needed for some cards */
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
+static void advk_pcie_train_link(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ u32 reg;
+ int ret;
+
+ /*
+ * Setup PCIe rev / gen compliance based on device tree property
+ * 'max-link-speed' which also forces maximal link speed.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~PCIE_GEN_SEL_MSK;
+ if (pcie->link_gen == 3)
+ reg |= SPEED_GEN_3;
+ else if (pcie->link_gen == 2)
+ reg |= SPEED_GEN_2;
+ else
+ reg |= SPEED_GEN_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Set maximal link speed value also into PCIe Link Control 2 register.
+ * Armada 3700 Functional Specification says that default value is based
+ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+ reg &= ~PCI_EXP_LNKCTL2_TLS;
+ if (pcie->link_gen == 3)
+ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+ else if (pcie->link_gen == 2)
+ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ else
+ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+
+ /* Enable link training after selecting PCIe generation */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Reset PCIe card via PERST# signal. Some cards are not detected
+ * during link training when they are in some non-initial state.
+ */
+ advk_pcie_issue_perst(pcie);
+
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called or issued explicitly by reset gpio
+ * function advk_pcie_issue_perst(), making the endpoint going into
+ * fundamental reset. As required by PCI Express spec (PCI Express
+ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
+ * Conventional Reset) a delay for at least 100ms after such a reset
+ * before sending a Configuration Request to the device is needed.
+ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
+ * waits for link at least 900ms.
+ */
+ ret = advk_pcie_wait_for_link(pcie);
+ if (ret < 0)
+ dev_err(dev, "link never came up\n");
+ else
+ dev_info(dev, "link up\n");
+}
+
/*
* Set PCIe address window register which could be used for memory
* mapping.
*/
-static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
- u32 win_num, u32 match_ms,
- u32 match_ls, u32 mask_ms,
- u32 mask_ls, u32 remap_ms,
- u32 remap_ls, u32 action)
-{
- advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
- advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
- advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
- advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
- advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
- advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
- advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
- advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
+ phys_addr_t match, phys_addr_t remap,
+ phys_addr_t mask, u32 actions)
+{
+ advk_writel(pcie, OB_WIN_ENABLE |
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
+{
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
}
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
@@ -273,10 +445,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
u32 reg;
int i;
- /* Point PCIe unit MBUS decode windows to DRAM space */
- for (i = 0; i < 8; i++)
- advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
-
/* Set to Direct mode */
reg = advk_readl(pcie, CTRL_CONFIG_REG);
reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
@@ -295,36 +463,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
- /* Set PCIe Device Control and Status 1 PF0 register */
- reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
- (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
- PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
- (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+ /* Set PCIe Device Control register */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ reg &= ~PCI_EXP_DEVCTL_READRQ;
+ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
+ reg |= PCI_EXP_DEVCTL_READRQ_512B;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
/* Program PCIe Control 2 to disable strict ordering */
reg = PCIE_CORE_CTRL2_RESERVED |
PCIE_CORE_CTRL2_TD_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Set GEN2 */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg &= ~PCIE_GEN_SEL_MSK;
- reg |= SPEED_GEN_2;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* Set lane X1 */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~LANE_CNT_MSK;
reg |= LANE_COUNT_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
- /* Enable link training */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg |= LINK_TRAINING_EN;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* Enable MSI */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
@@ -349,25 +508,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+ /*
+ * Enable AXI address window location generation:
+ * When it is enabled, the default outbound window
+ * configurations (Default User Field: 0xD0074CFC)
+ * are used to transparent address translation for
+ * the outbound transactions. Thus, PCIe address
+ * windows are not required for transparent memory
+ * access when default outbound window configuration
+ * is set for memory access.
+ */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Bypass the address window mapping for PIO */
+ /*
+ * Set memory access in Default User Field so it
+ * is not required to configure PCIe address for
+ * transparent memory access.
+ */
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
+
+ /*
+ * Bypass the address window mapping for PIO:
+ * Since PIO access already contains all required
+ * info over AXI interface by PIO registers, the
+ * address window is not required.
+ */
reg = advk_readl(pcie, PIO_CTRL);
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
- /* Start link training */
- reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
- reg |= PCIE_CORE_LINK_TRAINING;
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+ /*
+ * Configure PCIe address windows for non-memory or
+ * non-transparent access as by default PCIe uses
+ * transparent memory access.
+ */
+ for (i = 0; i < pcie->wins_count; i++)
+ advk_pcie_set_ob_win(pcie, i,
+ pcie->wins[i].match, pcie->wins[i].remap,
+ pcie->wins[i].mask, pcie->wins[i].actions);
- advk_pcie_wait_for_link(pcie);
+ /* Disable remaining PCIe outbound windows */
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
- reg = PCIE_CORE_LINK_L0S_ENTRY |
- (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+ advk_pcie_train_link(pcie);
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
@@ -376,7 +562,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
}
-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -387,14 +573,49 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
PIO_COMPLETION_STATUS_SHIFT;
- if (!status)
- return;
-
+ /*
+ * According to HW spec, the PIO status check sequence as below:
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+ * it still needs to check Error Status(bit11), only when this bit
+ * indicates no error happen, the operation is successful.
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+ * means a PIO write error, and for PIO read it is successful with
+ * a read value of 0xFFFFFFFF.
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * only means a PIO write error, and for PIO read it is successful
+ * with a read value of 0xFFFF0001.
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+ * error for both PIO read and PIO write operation.
+ * 5) other errors are indicated as 'unknown'.
+ */
switch (status) {
+ case PIO_COMPLETION_STATUS_OK:
+ if (reg & PIO_ERR_STATUS) {
+ strcomp_status = "COMP_ERR";
+ break;
+ }
+ /* Get the read result */
+ if (val)
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ /* No error */
+ strcomp_status = NULL;
+ break;
case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR";
break;
case PIO_COMPLETION_STATUS_CRS:
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is not enabled, the Root Complex
+ * must re-issue the Configuration Request as a new Request.
+ * A Root Complex implementation may choose to limit the number
+ * of Configuration Request/CRS Completion Status loops before
+ * determining that something is wrong with the target of the
+ * Request and taking appropriate action, e.g., complete the
+ * Request to the host as a failed transaction.
+ *
+ * To simplify implementation do not re-issue the Configuration
+ * Request and complete the Request as a failed transaction.
+ */
strcomp_status = "CRS";
break;
case PIO_COMPLETION_STATUS_CA:
@@ -405,35 +626,84 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
break;
}
+ if (!strcomp_status)
+ return 0;
+
if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted";
else
str_posted = "Posted";
- dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+ return -EFAULT;
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+ int i;
- while (time_before(jiffies, timeout)) {
+ for (i = 0; i < PIO_RETRY_CNT; i++) {
u32 start, isr;
start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR);
if (!start && isr)
return 0;
+ udelay(PIO_RETRY_DELAY);
}
- dev_err(dev, "config read/write timed out\n");
+ dev_err(dev, "PIO read/write transfer time out\n");
return -ETIMEDOUT;
}
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ /*
+ * Trying to start a new PIO transfer when previous has not completed
+ * cause External Abort on CPU which results in kernel panic:
+ *
+ * SError Interrupt on CPU0, code 0xbf000002 -- SError
+ * Kernel panic - not syncing: Asynchronous SError Interrupt
+ *
+ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+ * concurrent calls at the same time. But because PIO transfer may take
+ * about 1.5s when link is down or card is disconnected, it means that
+ * advk_pcie_wait_pio() does not always have to wait for completion.
+ *
+ * Some versions of ARM Trusted Firmware handles this External Abort at
+ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+ */
+ if (advk_readl(pcie, PIO_START)) {
+ dev_err(dev, "Previous PIO read/write transfer is still running\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+ int devfn)
+{
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ return false;
+
+ /*
+ * If the link goes down after we check for link-up, nothing bad
+ * happens but the config access times out.
+ */
+ if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
+ return false;
+
+ return true;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -441,14 +711,15 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
+ if (!advk_pcie_valid_device(pcie, bus, devfn)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie)) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -467,17 +738,23 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
- if (ret < 0)
+ if (ret < 0) {
+ *val = 0xffffffff;
return PCIBIOS_SET_FAILED;
+ }
- advk_pcie_check_pio_status(pcie);
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, val);
+ if (ret < 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
- /* Get the read result */
- *val = advk_readl(pcie, PIO_RD_DATA);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
@@ -495,15 +772,14 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int offset;
int ret;
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
return PCIBIOS_SET_FAILED;
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie))
+ return PCIBIOS_SET_FAILED;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -530,14 +806,17 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
return PCIBIOS_SET_FAILED;
- advk_pcie_check_pio_status(pcie);
+ ret = advk_pcie_check_pio_status(pcie, NULL);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
return PCIBIOS_SUCCESSFUL;
}
@@ -588,7 +867,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
domain->host_data, handle_simple_irq,
NULL, NULL);
- return hwirq;
+ return 0;
}
static void advk_msi_irq_domain_free(struct irq_domain *domain,
@@ -611,22 +890,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void advk_pcie_irq_unmask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static int advk_pcie_irq_map(struct irq_domain *h,
@@ -708,6 +993,9 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
struct irq_chip *irq_chip;
+ int ret = 0;
+
+ raw_spin_lock_init(&pcie->irq_lock);
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
@@ -720,8 +1008,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
dev_name(dev));
if (!irq_chip->name) {
- of_node_put(pcie_intc_node);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put_node;
}
irq_chip->irq_mask = advk_pcie_irq_mask;
@@ -733,11 +1021,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
&advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put_node;
}
- return 0;
+out_put_node:
+ of_node_put(pcie_intc_node);
+ return ret;
}
static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
@@ -758,8 +1048,12 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
if (!(BIT(msi_idx) & msi_status))
continue;
+ /*
+ * msi_idx contains bits [4:0] of the msi_data and msi_data
+ * contains 16bit MSI interrupt number
+ */
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
+ msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
generic_handle_irq(msi_data);
}
@@ -781,12 +1075,6 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
- if (!isr0_status && !isr1_status) {
- advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
- advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
- return;
- }
-
/* Process MSI interrupts */
if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
advk_pcie_handle_msi(pcie);
@@ -845,13 +1133,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
switch (resource_type(res)) {
case IORESOURCE_IO:
- advk_pcie_set_ob_win(pcie, 1,
- upper_32_bits(res->start),
- lower_32_bits(res->start),
- 0, 0xF8000000, 0,
- lower_32_bits(res->start),
- OB_PCIE_IO);
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
@@ -859,12 +1141,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
}
break;
case IORESOURCE_MEM:
- advk_pcie_set_ob_win(pcie, 0,
- upper_32_bits(res->start),
- lower_32_bits(res->start),
- 0x0, 0xF8000000, 0,
- lower_32_bits(res->start),
- (2 << 20) | OB_PCIE_MEM);
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
@@ -893,6 +1169,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct resource *res;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
@@ -922,6 +1199,103 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
+ resource_list_for_each_entry(entry, &pcie->resources) {
+ resource_size_t start = entry->res->start;
+ resource_size_t size = resource_size(entry->res);
+ unsigned long type = resource_type(entry->res);
+ u64 win_size;
+
+ /*
+ * Aardvark hardware allows to configure also PCIe window
+ * for config type 0 and type 1 mapping, but driver uses
+ * only PIO for issuing configuration transfers which does
+ * not use PCIe window configuration.
+ */
+ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
+ type != IORESOURCE_IO)
+ continue;
+
+ /*
+ * Skip transparent memory resources. Default outbound access
+ * configuration is set to transparent memory access so it
+ * does not need window configuration.
+ */
+ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
+ entry->offset == 0)
+ continue;
+
+ /*
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
+ * and an access to address A uses this window if A matches the
+ * match with given mask.
+ * So every PCIe window size must be a power of two and every start
+ * address must be aligned to window size. Minimal size is 64 KiB
+ * because lower 16 bits of mask must be zero. Remapped address
+ * may have set only bits from the mask.
+ */
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
+ /* Calculate the largest aligned window size */
+ win_size = (1ULL << (fls64(size)-1)) |
+ (start ? (1ULL << __ffs64(start)) : 0);
+ win_size = 1ULL << __ffs64(win_size);
+ if (win_size < 0x10000)
+ break;
+
+ dev_dbg(dev,
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
+ pcie->wins_count, (unsigned long long)start,
+ (unsigned long long)start + win_size, type);
+
+ if (type == IORESOURCE_IO) {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
+ } else {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
+ pcie->wins[pcie->wins_count].match = start;
+ }
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
+
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
+ break;
+
+ start += win_size;
+ size -= win_size;
+ pcie->wins_count++;
+ }
+
+ if (size > 0) {
+ dev_err(&pcie->pdev->dev,
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
+ (unsigned long long)entry->res->start,
+ (unsigned long long)entry->res->end + 1);
+ return -EINVAL;
+ }
+ }
+
+ pcie->reset_gpio = devm_fwnode_get_index_gpiod_from_child(dev, "reset",
+ 0,
+ dev_fwnode(dev),
+ GPIOD_OUT_LOW,
+ "pcie1-reset");
+ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
+ if (ret) {
+ if (ret == -ENOENT) {
+ pcie->reset_gpio = NULL;
+ } else {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset-gpio: %i\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = of_pci_get_max_link_speed(dev->of_node);
+ if (ret <= 0 || ret > 3)
+ pcie->link_gen = 3;
+ else
+ pcie->link_gen = ret;
+
advk_pcie_setup_hw(pcie);
ret = advk_pcie_init_irq_domain(pcie);
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index fc0ca03f280e..ea4d12c76cfe 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -119,7 +119,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
- node_bits = (cfg->res.start >> 32) & (1 << 12);
+ node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6e066f8b74df..1b133bf644bd 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -22,6 +22,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -325,9 +326,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
* structure here for the BAR.
*/
bar4_start = res_pem->start + 0xf00000;
- pem_pci->ea_entry[0] = (u32)bar4_start | 2;
- pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
- pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+ pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+ pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
cfg->priv = pem_pci;
return 0;
@@ -335,9 +336,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-#define PEM_RES_BASE 0x87e0c0000000UL
-#define PEM_NODE_MASK GENMASK(45, 44)
-#define PEM_INDX_MASK GENMASK(26, 24)
+#define PEM_RES_BASE 0x87e0c0000000ULL
+#define PEM_NODE_MASK GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK GENMASK_ULL(26, 24)
#define PEM_MIN_DOM_IN_NODE 4
#define PEM_MAX_DOM_IN_NODE 10
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f42a202b021..784b3f61199e 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -393,13 +393,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
if (!msi_group->gic_irq)
continue;
- irq_set_chained_handler(msi_group->gic_irq,
- xgene_msi_isr);
- err = irq_set_handler_data(msi_group->gic_irq, msi_group);
- if (err) {
- pr_err("failed to register GIC IRQ handler\n");
- return -EINVAL;
- }
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ xgene_msi_isr, msi_group);
+
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 2d0f535a2f69..7e6f7b8504d2 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -217,15 +217,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
int target_cpu = cpumask_first(mask);
int curr_cpu;
+ int ret;
curr_cpu = hwirq_to_cpu(msi, data->hwirq);
if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
+ ret = IRQ_SET_MASK_OK_DONE;
+ else {
+ /* steer MSI to the target CPU */
+ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ ret = IRQ_SET_MASK_OK;
+ }
- /* steer MSI to the target CPU */
- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
- return IRQ_SET_MASK_OK;
+ return ret;
}
static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 8f8dac0155d6..2565abbe1a91 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -306,7 +306,7 @@ enum iproc_pcie_reg {
};
/* iProc PCIe PAXB BCMA registers */
-static const u16 iproc_pcie_reg_paxb_bcma[] = {
+static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -317,7 +317,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = {
};
/* iProc PCIe PAXB registers */
-static const u16 iproc_pcie_reg_paxb[] = {
+static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -333,7 +333,7 @@ static const u16 iproc_pcie_reg_paxb[] = {
};
/* iProc PCIe PAXB v2 registers */
-static const u16 iproc_pcie_reg_paxb_v2[] = {
+static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
@@ -361,7 +361,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
};
/* iProc PCIe PAXC v1 registers */
-static const u16 iproc_pcie_reg_paxc[] = {
+static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_CLK_CTRL] = 0x000,
[IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
[IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
@@ -370,7 +370,7 @@ static const u16 iproc_pcie_reg_paxc[] = {
};
/* iProc PCIe PAXC v2 registers */
-static const u16 iproc_pcie_reg_paxc_v2[] = {
+static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
[IPROC_PCIE_MSI_GIC_MODE] = 0x050,
[IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
[IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index c896bb9ef968..60c3110b5151 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -1042,14 +1042,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
err = of_pci_get_devfn(child);
if (err < 0) {
dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
+ goto error_put_node;
}
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- return err;
+ goto error_put_node;
}
err = mtk_pcie_subsys_powerup(pcie);
@@ -1065,6 +1065,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
+error_put_node:
+ of_node_put(child);
+ return err;
}
static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 2b0a1f3b8265..0077afca2493 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -328,11 +328,12 @@ static struct pci_ops rcar_pcie_ops = {
};
static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
- struct resource *res)
+ struct resource_entry *window)
{
/* Setup PCIe address space mappings for each resource */
resource_size_t size;
resource_size_t res_start;
+ struct resource *res = window->res;
u32 mask;
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -346,9 +347,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
if (res->flags & IORESOURCE_IO)
- res_start = pci_pio_to_address(res->start);
+ res_start = pci_pio_to_address(res->start) - window->offset;
else
- res_start = res->start;
+ res_start = res->start - window->offset;
rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
@@ -377,7 +378,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
switch (resource_type(res)) {
case IORESOURCE_IO:
case IORESOURCE_MEM:
- rcar_pcie_setup_window(i, pci, res);
+ rcar_pcie_setup_window(i, pci, win);
i++;
break;
case IORESOURCE_BUS:
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 981a5195686f..6812a1b49fa8 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -10,6 +10,7 @@
* (at your option) any later version.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -171,6 +172,7 @@ struct nwl_pcie {
u8 root_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -852,6 +854,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ err = clk_prepare_enable(pcie->clk);
+ if (err) {
+ dev_err(dev, "can't enable PCIe ref clock\n");
+ return err;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index af6d5da10ea5..79d56638878c 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -638,9 +638,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
- irq_domain_free_fwnode(fn);
- if (!vmd->irq_domain)
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
return -ENODEV;
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource(&resources, &vmd->resources[1]);
@@ -650,6 +651,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
return -ENODEV;
}
@@ -752,6 +754,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
@@ -760,6 +763,7 @@ static void vmd_remove(struct pci_dev *dev)
vmd_teardown_dma_ops(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 711875afdd70..40e936e3a480 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -136,13 +136,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
struct acpiphp_context *context;
acpi_lock_hp_context();
+
context = acpiphp_get_context(adev);
- if (!context || context->func.parent->is_going_away) {
- acpi_unlock_hp_context();
- return NULL;
+ if (!context)
+ goto unlock;
+
+ if (context->func.parent->is_going_away) {
+ acpiphp_put_context(context);
+ context = NULL;
+ goto unlock;
}
+
get_bridge(context->func.parent);
acpiphp_put_context(context);
+
+unlock:
acpi_unlock_hp_context();
return context;
}
@@ -501,6 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
continue;
}
+ pci_dev_put(dev);
}
}
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index edb5d8a53020..5b2228402f9b 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -39,12 +39,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
- memcpy(drc_name, buf, nbytes);
+ strscpy(drc_name, buf, nbytes + 1);
end = strchr(drc_name, '\n');
- if (!end)
- end = &drc_name[nbytes];
- *end = '\0';
+ if (end)
+ *end = '\0';
rc = dlpar_add_slot(drc_name);
if (rc)
@@ -70,12 +69,11 @@ static ssize_t remove_slot_store(struct kobject *kobj,
if (nbytes >= MAX_DRC_NAME_LEN)
return 0;
- memcpy(drc_name, buf, nbytes);
+ strscpy(drc_name, buf, nbytes + 1);
end = strchr(drc_name, '\n');
- if (!end)
- end = &drc_name[nbytes];
- *end = '\0';
+ if (end)
+ *end = '\0';
rc = dlpar_remove_slot(drc_name);
if (rc)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2a203055b16e..bbddf492da9f 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -170,24 +170,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- u32 mask_bits = desc->masked;
+ raw_spinlock_t *lock = &desc->dev->msi_lock;
+ unsigned long flags;
if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
- return 0;
+ return;
- mask_bits &= ~mask;
- mask_bits |= flag;
+ raw_spin_lock_irqsave(lock, flags);
+ desc->masked &= ~mask;
+ desc->masked |= flag;
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
- mask_bits);
-
- return mask_bits;
+ desc->masked);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+ __pci_msi_desc_mask_irq(desc, mask, flag);
}
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
@@ -302,10 +303,28 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
+ bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
+ /*
+ * The specification mandates that the entry is masked
+ * when the message is modified:
+ *
+ * "If software changes the Address or Data value of an
+ * entry while the entry is unmasked, the result is
+ * undefined."
+ */
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, 0);
+
+ /* Ensure that the writes are visible in the device */
+ readl(base + PCI_MSIX_ENTRY_DATA);
} else {
int pos = dev->msi_cap;
u16 msgctl;
@@ -326,6 +345,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
msg->data);
}
+ /* Ensure that the writes are visible in the device */
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
}
entry->msg = *msg;
}
@@ -351,18 +372,6 @@ static void free_msi_irqs(struct pci_dev *dev)
for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i));
- pci_msi_teardown_msi_irqs(dev);
-
- list_for_each_entry_safe(entry, tmp, msi_list, list) {
- if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, msi_list))
- iounmap(entry->mask_base);
- }
-
- list_del(&entry->list);
- free_msi_entry(entry);
- }
-
if (dev->msi_irq_groups) {
sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
msi_attrs = dev->msi_irq_groups[0]->attrs;
@@ -378,6 +387,18 @@ static void free_msi_irqs(struct pci_dev *dev)
kfree(dev->msi_irq_groups);
dev->msi_irq_groups = NULL;
}
+
+ pci_msi_teardown_msi_irqs(dev);
+
+ list_for_each_entry_safe(entry, tmp, msi_list, list) {
+ if (entry->msi_attrib.is_msix) {
+ if (list_is_last(&entry->list, msi_list))
+ iounmap(entry->mask_base);
+ }
+
+ list_del(&entry->list);
+ free_msi_entry(entry);
+ }
}
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -619,21 +640,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
ret = msi_verify_entries(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
ret = populate_msi_sysfs(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
@@ -674,6 +695,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
+ void __iomem *addr;
int ret, i;
if (affd)
@@ -693,6 +715,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
+
if (entries)
entry->msi_attrib.entry_nr = entries[i].entry;
else
@@ -700,6 +723,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
+ addr = pci_msix_desc_addr(entry);
+ if (addr)
+ entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks)
curmsk++;
@@ -710,21 +737,30 @@ out:
return ret;
}
-static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
struct msi_desc *entry;
- int i = 0;
for_each_pci_msi_entry(entry, dev) {
- if (entries)
- entries[i++].vector = entry->irq;
- entry->masked = readl(pci_msix_desc_addr(entry) +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- msix_mask_irq(entry, 1);
+ if (entries) {
+ entries->vector = entry->irq;
+ entries++;
+ }
}
}
+static void msix_mask_all(void __iomem *base, int tsize)
+{
+ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ int i;
+
+ if (pci_msi_ignore_mask)
+ return;
+
+ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -739,22 +775,30 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, const struct irq_affinity *affd)
{
- int ret;
- u16 control;
void __iomem *base;
+ int ret, tsize;
+ u16 control;
- /* Ensure MSI-X is disabled while it is set up */
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ /*
+ * Some devices require MSI-X to be enabled before the MSI-X
+ * registers can be accessed. Mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+ PCI_MSIX_FLAGS_ENABLE);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, msix_table_size(control));
- if (!base)
- return -ENOMEM;
+ tsize = msix_table_size(control);
+ base = msix_map_region(dev, tsize);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_disable;
+ }
ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
- return ret;
+ goto out_disable;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
@@ -765,15 +809,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (ret)
goto out_free;
- /*
- * Some devices require MSI-X to be enabled before we can touch the
- * MSI-X registers. We need to mask all the vectors to prevent
- * interrupts coming in before they're fully set up.
- */
- pci_msix_clear_and_set_ctrl(dev, 0,
- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
- msix_program_entries(dev, entries);
+ msix_update_entries(dev, entries);
ret = populate_msi_sysfs(dev);
if (ret)
@@ -782,6 +818,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
+
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(base, tsize);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -807,6 +853,9 @@ out_avail:
out_free:
free_msi_irqs(dev);
+out_disable:
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
+
return ret;
}
@@ -894,8 +943,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
/* Return the device with MSI unmasked as initial states */
mask = msi_mask(desc->msi_attrib.multi_cap);
- /* Keep cached state to be restored */
- __pci_msi_desc_mask_irq(desc, mask, ~mask);
+ msi_mask_irq(desc, mask, 0);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -980,10 +1028,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}
/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev) {
- /* Keep cached states to be restored */
+ for_each_pci_msi_entry(entry, dev)
__pci_msix_desc_mask_irq(entry, 1);
- }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a3cedf8de863..fa44e1506357 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -573,7 +573,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
if (acpi_pm_device_can_wakeup(&bus->self->dev))
- return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
+ return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
bus = bus->parent;
}
@@ -581,7 +581,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
/* We have reached the root bus. */
if (bus->bridge) {
if (acpi_pm_device_can_wakeup(bus->bridge))
- return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
+ return acpi_pm_set_device_wakeup(bus->bridge, enable);
}
return 0;
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index a961a71d950f..6beafc1bee96 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -161,7 +161,7 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
obj->buffer.length,
UTF16_LITTLE_ENDIAN,
- buf, PAGE_SIZE);
+ buf, PAGE_SIZE - 1);
buf[len] = '\n';
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c847b5554db6..4ff7f2575d28 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1384,11 +1384,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
* so that things like MSI message writing will behave as expected
* (e.g. if the device really is in D0 at enable time).
*/
- if (dev->pm_cap) {
- u16 pmcsr;
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
- }
+ pci_update_current_state(dev, dev->current_state);
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
@@ -1950,7 +1946,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
if (enable) {
int error;
- if (pci_pme_capable(dev, state))
+ /*
+ * Enable PME signaling if the device can signal PME from
+ * D3cold regardless of whether or not it can signal PME from
+ * the current target state, because that will allow it to
+ * signal PME when the hierarchy above it goes into D3cold and
+ * the device itself ends up in D3cold as a result of that.
+ */
+ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
pci_pme_active(dev, true);
else
ret = 1;
@@ -2054,17 +2057,21 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
if (dev->current_state == PCI_D3cold)
target_state = PCI_D3cold;
- if (wakeup) {
+ if (wakeup && dev->pme_support) {
+ pci_power_t state = target_state;
+
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
- if (dev->pme_support) {
- while (target_state
- && !(dev->pme_support & (1 << target_state)))
- target_state--;
- }
+ while (state && !(dev->pme_support & (1 << state)))
+ state--;
+
+ if (state)
+ return state;
+ else if (dev->pme_support & 1)
+ return PCI_D0;
}
return target_state;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fdb02c1f94bb..9f5215e25df4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -365,6 +365,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
+#else
+static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
+ u16 segment, struct resource *res)
+{
+ return -ENODEV;
+}
#endif
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 6b4e82a4b64e..f41c105adfbd 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -80,6 +80,7 @@ struct pcie_link_state {
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -177,8 +178,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- /* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable)
+ /*
+ * Don't enable Clock PM if the link is not Clock PM capable
+ * or Clock PM is disabled
+ */
+ if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@@ -208,7 +212,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- link->clkpm_capable = (blacklist) ? 0 : capable;
+ link->clkpm_capable = capable;
+ link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -579,16 +584,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM for now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
- link->aspm_disable = ASPM_STATE_ALL;
- break;
- }
- }
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
@@ -693,9 +688,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
@@ -1052,10 +1047,9 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
link->aspm_disable |= ASPM_STATE_L1;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
- if (state & PCIE_LINK_STATE_CLKPM) {
- link->clkpm_capable = 0;
- pcie_set_clkpm(link, 0);
- }
+ if (state & PCIE_LINK_STATE_CLKPM)
+ link->clkpm_disable = 1;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
@@ -1117,6 +1111,7 @@ static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
else
cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
+ cnt += sprintf(buffer + cnt, "\n");
return cnt;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 3008bba360f3..ec6f6213960b 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -47,10 +47,6 @@ void pci_ptm_init(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
/*
* Enable PTM only on interior devices (root ports, switch ports,
* etc.) on the assumption that it causes no link traffic until an
@@ -60,6 +56,23 @@ void pci_ptm_init(struct pci_dev *dev)
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
return;
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16).
+ */
+ ups = pci_upstream_bridge(dev);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ ups && ups->ptm_enabled) {
+ dev->ptm_granularity = ups->ptm_granularity;
+ dev->ptm_enabled = 1;
+ return;
+ }
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
@@ -69,7 +82,6 @@ void pci_ptm_init(struct pci_dev *dev)
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
* furthest upstream Time Source as the PTM Root.
*/
- ups = pci_upstream_bridge(dev);
if (ups && ups->ptm_enabled) {
ctrl = PCI_PTM_CTRL_ENABLE;
if (ups->ptm_granularity == 0)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e23bfd9845b1..e716d8bba227 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -792,9 +792,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
goto free;
err = device_register(&bridge->dev);
- if (err)
+ if (err) {
put_device(&bridge->dev);
-
+ goto free;
+ }
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -1446,7 +1447,7 @@ int pci_setup_device(struct pci_dev *dev)
/* device class may be changed after fixup */
class = dev->class >> 8;
- if (dev->non_compliant_bars) {
+ if (dev->non_compliant_bars && !dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
@@ -1557,13 +1558,33 @@ static void pci_configure_mps(struct pci_dev *dev)
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, p_mps, rc;
- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ if (!pci_is_pcie(dev))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
+ /*
+ * For Root Complex Integrated Endpoints, program the maximum
+ * supported value unless limited by the PCIE_BUS_PEER2PEER case.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ mps = 128;
+ else
+ mps = 128 << dev->pcie_mpss;
+ rc = pcie_set_mps(dev, mps);
+ if (rc) {
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps);
+ }
+ return;
+ }
+
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -1993,6 +2014,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
pci_set_of_node(dev);
if (pci_setup_device(dev)) {
+ pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e7ed051ec125..1bfc24654b58 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -27,6 +27,7 @@
#include <linux/ktime.h>
#include <linux/mm.h>
#include <linux/platform_data/x86/apple.h>
+#include <linux/pm_runtime.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -2085,6 +2086,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
* Link bit cleared after starting the link retrain process to allow this
@@ -3020,12 +3034,13 @@ static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
/* Intel 5000 and 5100 Memory controllers have an errata with read completion
* coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
@@ -3376,6 +3391,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+ if ((dev->device & 0xffc0) == 0x2340)
+ quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ quirk_nvidia_no_bus_reset);
+
+/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
@@ -3387,6 +3414,17 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
+
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working. Prevent bus reset for these devices. With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs. Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
@@ -3839,6 +3877,69 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
+#define PCI_DEVICE_ID_HINIC_VF 0x375E
+#define HINIC_VF_FLR_TYPE 0x1000
+#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
+#define HINIC_VF_OP 0xE80
+#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+ unsigned long timeout;
+ void __iomem *bar;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(pdev, 0, 0);
+ if (!bar)
+ return -ENOTTY;
+
+ /* Get and check firmware capabilities */
+ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+ pci_iounmap(pdev, bar);
+ return -ENOTTY;
+ }
+
+ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+ val = ioread32be(bar + HINIC_VF_OP);
+ val = val | HINIC_VF_FLR_PROC_BIT;
+ iowrite32be(val, bar + HINIC_VF_OP);
+
+ pcie_flr(pdev);
+
+ /*
+ * The device must recapture its Bus and Device Numbers after FLR
+ * in order generate Completions. Issue a config write to let the
+ * device capture this information.
+ */
+ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+ do {
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+ msleep(20);
+ } while (time_before(jiffies, timeout));
+
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+
+ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+ pci_iounmap(pdev, bar);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3848,6 +3949,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
+ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+ reset_hinic_vf_dev },
{ 0 }
};
@@ -3920,6 +4023,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
quirk_dma_func1_alias);
@@ -4236,6 +4342,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -4275,10 +4399,12 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
if (ACPI_FAILURE(status))
return -ENODEV;
+ acpi_put_table(header);
+
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4305,20 +4431,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4328,13 +4453,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4382,37 +4506,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
-
- dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
@@ -4495,7 +4614,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4509,10 +4628,35 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+}
+
+static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Intel RCiEP's are required to allow p2p only on translated
+ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16,
+ * "Root-Complex Peer to Peer Considerations".
+ */
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
+ return -ENOTTY;
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
+static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * iProc PAXB Root Ports don't advertise an ACS capability, but
+ * they do not allow peer-to-peer transactions between Root Ports.
+ * Allow each Root Port to be in a separate IOMMU group by masking
+ * SV/RR/CR/UF bits.
+ */
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4585,6 +4729,7 @@ static const struct pci_dev_acs_enabled {
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
/* QCOM QDF2xxx root ports */
{ 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
{ 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
@@ -4595,11 +4740,38 @@ static const struct pci_dev_acs_enabled {
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
+ /* Cavium multi-function devices */
+ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
+ /* Ampere Computing */
+ { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
+ { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+ /* Broadcom multi-function device */
+ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -4869,13 +5041,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang. */
-static void quirk_intel_no_flr(struct pci_dev *dev)
+/*
+ * FLR may cause the following to devices to hang:
+ *
+ * AMD Starship/Matisse HD Audio Controller 0x1487
+ * AMD Starship USB 3.0 Host Controller 0x148c
+ * AMD Matisse USB 3.0 Host Controller 0x149c
+ * Intel 82579LM Gigabit Ethernet Controller 0x1502
+ * Intel 82579V Gigabit Ethernet Controller 0x1503
+ *
+ */
+static void quirk_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
@@ -4912,3 +5096,63 @@ static void quirk_no_ats(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
+
+/* Freescale PCIe doesn't support MSI in RC mode */
+static void quirk_fsl_no_msi(struct pci_dev *pdev)
+{
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
+
+/*
+ * Although not allowed by the spec, some multi-function devices have
+ * dependencies of one function (consumer) on another (supplier). For the
+ * consumer to work in D0, the supplier must also be in D0. Create a
+ * device link from the consumer to the supplier to enforce this
+ * dependency. Runtime PM is allowed by default on the consumer to prevent
+ * it from permanently keeping the supplier awake.
+ */
+static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
+ unsigned int supplier, unsigned int class,
+ unsigned int class_shift)
+{
+ struct pci_dev *supplier_pdev;
+
+ if (PCI_FUNC(pdev->devfn) != consumer)
+ return;
+
+ supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
+ if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
+ pci_dev_put(supplier_pdev);
+ return;
+ }
+
+ if (device_link_add(&pdev->dev, &supplier_pdev->dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
+ pci_info(pdev, "D0 power state depends on %s\n",
+ pci_name(supplier_pdev));
+ else
+ pci_err(pdev, "Cannot enforce power dependency on %s\n",
+ pci_name(supplier_pdev));
+
+ pm_runtime_allow(&pdev->dev);
+ pci_dev_put(supplier_pdev);
+}
+
+/*
+ * Create device link for GPUs with integrated HDA controller for streaming
+ * audio to attached displays.
+ */
+static void quirk_gpu_hda(struct pci_dev *hda)
+{
+ pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index e42909524dee..e5c3a27eaea2 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -303,16 +303,19 @@ placeholder:
slot_name = make_slot_name(name);
if (!slot_name) {
err = -ENOMEM;
+ kfree(slot);
goto err;
}
+ INIT_LIST_HEAD(&slot->list);
+ list_add(&slot->list, &parent->slots);
+
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", slot_name);
- if (err)
+ if (err) {
+ kobject_put(&slot->kobj);
goto err;
-
- INIT_LIST_HEAD(&slot->list);
- list_add(&slot->list, &parent->slots);
+ }
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
@@ -328,7 +331,6 @@ out:
mutex_unlock(&pci_slot_mutex);
return slot;
err:
- kfree(slot);
slot = ERR_PTR(err);
goto out;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index bf229b442e72..6ef0d4b756f0 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -412,7 +412,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
kref_get(&stuser->kref);
stuser->read_len = sizeof(stuser->data);
stuser_set_state(stuser, MRPC_QUEUED);
- init_completion(&stuser->comp);
+ reinit_completion(&stuser->comp);
list_add_tail(&stuser->list, &stdev->mrpc_queue);
mrpc_cmd_submit(stdev);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 83efa001c2e7..18345d4643d7 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -22,10 +22,12 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
u16 word;
u32 dword;
long err;
- long cfg_ret;
+ int cfg_ret;
+ err = -EPERM;
+ dev = NULL;
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ goto error;
err = -ENODEV;
dev = pci_get_bus_and_slot(bus, dfn);
@@ -48,7 +50,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
}
err = -EIO;
- if (cfg_ret != PCIBIOS_SUCCESSFUL)
+ if (cfg_ret)
goto error;
switch (len) {
@@ -106,7 +108,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_byte(dev, off, byte);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -115,7 +117,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_word(dev, off, word);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;
@@ -124,7 +126,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
if (err)
break;
err = pci_user_write_config_dword(dev, off, dword);
- if (err != PCIBIOS_SUCCESSFUL)
+ if (err)
err = -EIO;
break;