// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017 Cadence // Cadence PCIe endpoint controller driver. // Author: Cyrille Pitchen #include #include #include #include #include #include #include #include "pcie-cadence.h" #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 /** * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver * @pcie: Cadence PCIe controller * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ * dedicated outbound regions is mapped. * @irq_cpu_addr: base address in the CPU space where a write access triggers * the sending of a memory write (MSI) / normal message (legacy * IRQ) TLP through the PCIe bus. * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ * dedicated outbound region. * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/legacy IRQ dedicated outbound region. * @irq_pending: bitmask of asserted legacy IRQs. */ struct cdns_pcie_ep { struct cdns_pcie pcie; u32 max_regions; unsigned long ob_region_map; phys_addr_t *ob_addr; phys_addr_t irq_phys_addr; void __iomem *irq_cpu_addr; u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; }; static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, struct pci_epf_header *hdr) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, hdr->subclass_code | hdr->baseclass_code << 8); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, hdr->cache_line_size); cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); /* * Vendor ID can only be modified from function 0, all other functions * use the same vendor ID as function 0. */ if (fn == 0) { /* Update the vendor IDs. */ u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); } return 0; } static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; int flags = epf_bar->flags; u32 addr0, addr1, reg, cfg, b, aperture, ctrl; u64 sz; /* BAR size is 2^(aperture + 7) */ sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); /* * roundup_pow_of_two() returns an unsigned long, which is not suited * for 64bit values. */ sz = 1ULL << fls64(sz - 1); aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; } else { bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); bool is_64bits = sz > SZ_2G; if (is_64bits && (bar & 1)) return -EINVAL; if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; if (is_64bits && is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; else if (is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; else if (is_64bits) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; else ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; } addr0 = lower_32_bits(bar_phys); addr1 = upper_32_bits(bar_phys); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), addr1); if (bar < BAR_4) { reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); b = bar; } else { reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); b = bar - BAR_4; } cfg = cdns_pcie_readl(pcie, reg); cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); return 0; } static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; enum pci_barno bar = epf_bar->barno; u32 reg, cfg, b, ctrl; if (bar < BAR_4) { reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); b = bar; } else { reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); b = bar - BAR_4; } ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; cfg = cdns_pcie_readl(pcie, reg); cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); cdns_pcie_writel(pcie, reg, cfg); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); } static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, u64 pci_addr, size_t size) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 r; r = find_first_zero_bit(&ep->ob_region_map, sizeof(ep->ob_region_map) * BITS_PER_LONG); if (r >= ep->max_regions - 1) { dev_err(&epc->dev, "no free outbound region\n"); return -EINVAL; } cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; return 0; } static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 r; for (r = 0; r < ep->max_regions - 1; r++) if (ep->ob_addr[r] == addr) break; if (r == ep->max_regions - 1) return; cdns_pcie_reset_outbound_region(pcie, r); ep->ob_addr[r] = 0; clear_bit(r, &ep->ob_region_map); } static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags; /* * Set the Multiple Message Capable bitfield into the Message Control * register. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); flags |= PCI_MSI_FLAGS_64BIT; flags &= ~PCI_MSI_FLAGS_MASKBIT; cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); return 0; } static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme; /* Validate that the MSI feature is actually enabled. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* * Get the Multiple Message Enable bitfield from the Message Control * register. */ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; return mme; } static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; u32 offset; u16 status; u8 msg_code; intx &= 3; /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; } if (is_asserted) { ep->irq_pending |= BIT(intx); msg_code = MSG_CODE_ASSERT_INTA + intx; } else { ep->irq_pending &= ~BIT(intx); msg_code = MSG_CODE_DEASSERT_INTA + intx; } status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { status ^= PCI_STATUS_INTERRUPT; cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); } offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | CDNS_PCIE_MSG_NO_DATA; writel(0, ep->irq_cpu_addr + offset); } static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) { u16 cmd; cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); if (cmd & PCI_COMMAND_INTX_DISABLE) return -EINVAL; cdns_pcie_ep_assert_intx(ep, fn, intx, true); /* * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() * from drivers/pci/dwc/pci-dra7xx.c */ mdelay(1); cdns_pcie_ep_assert_intx(ep, fn, intx, false); return 0; } static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 interrupt_num) { struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme, data, data_mask; u8 msi_count; u64 pci_addr, pci_addr_mask = 0xff; /* Check whether the MSI feature has been enabled by the PCI host. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* Get the number of enabled MSIs */ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; /* Compute the data value to be written. */ data_mask = msi_count - 1; data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); /* Get the PCI address where to write the data into. */ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); pci_addr <<= 32; pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); pci_addr &= GENMASK_ULL(63, 2); /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ cdns_pcie_set_outbound_region(pcie, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, pci_addr_mask + 1); ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); ep->irq_pci_fn = fn; } writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); return 0; } static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, u16 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); switch (type) { case PCI_EPC_IRQ_LEGACY: return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); default: break; } return -EINVAL; } static int cdns_pcie_ep_start(struct pci_epc *epc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct pci_epf *epf; u32 cfg; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled * and can't be disabled anyway. */ cfg = BIT(0); list_for_each_entry(epf, &epc->pci_epf, list) cfg |= BIT(epf->func_no); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); /* * The PCIe links are automatically established by the controller * once for all at powerup: the software can neither start nor stop * those links later at runtime. * * Then we only have to notify the EP core that our links are already * established. However we don't call directly pci_epc_linkup() because * we've already locked the epc->lock. */ list_for_each_entry(epf, &epc->pci_epf, list) pci_epf_linkup(epf); return 0; } static const struct pci_epc_ops cdns_pcie_epc_ops = { .write_header = cdns_pcie_ep_write_header, .set_bar = cdns_pcie_ep_set_bar, .clear_bar = cdns_pcie_ep_clear_bar, .map_addr = cdns_pcie_ep_map_addr, .unmap_addr = cdns_pcie_ep_unmap_addr, .set_msi = cdns_pcie_ep_set_msi, .get_msi = cdns_pcie_ep_get_msi, .raise_irq = cdns_pcie_ep_raise_irq, .start = cdns_pcie_ep_start, }; static const struct of_device_id cdns_pcie_ep_of_match[] = { { .compatible = "cdns,cdns-pcie-ep" }, { }, }; static int cdns_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct cdns_pcie_ep *ep; struct cdns_pcie *pcie; struct pci_epc *epc; struct resource *res; int ret; int phy_count; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; pcie = &ep->pcie; pcie->is_rc = false; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); pcie->reg_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); if (!res) { dev_err(dev, "missing \"mem\"\n"); return -EINVAL; } pcie->mem_res = res; ret = of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); if (ret < 0) { dev_err(dev, "missing \"cdns,max-outbound-regions\"\n"); return ret; } ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr), GFP_KERNEL); if (!ep->ob_addr) return -ENOMEM; ret = cdns_pcie_init_phy(dev, pcie); if (ret) { dev_err(dev, "failed to init phy\n"); return ret; } platform_set_drvdata(pdev, pcie); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync() failed\n"); goto err_get_sync; } /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); ret = PTR_ERR(epc); goto err_init; } epc_set_drvdata(epc, ep); if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) epc->max_functions = 1; ret = pci_epc_mem_init(epc, pcie->mem_res->start, resource_size(pcie->mem_res)); if (ret < 0) { dev_err(dev, "failed to initialize the memory space\n"); goto err_init; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, SZ_128K); if (!ep->irq_cpu_addr) { dev_err(dev, "failed to reserve memory space for MSI\n"); ret = -ENOMEM; goto free_epc_mem; } ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; /* Reserve region 0 for IRQs */ set_bit(0, &ep->ob_region_map); return 0; free_epc_mem: pci_epc_mem_exit(epc); err_init: pm_runtime_put_sync(dev); err_get_sync: pm_runtime_disable(dev); cdns_pcie_disable_phy(pcie); phy_count = pcie->phy_count; while (phy_count--) device_link_del(pcie->link[phy_count]); return ret; } static void cdns_pcie_ep_shutdown(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct cdns_pcie *pcie = dev_get_drvdata(dev); int ret; ret = pm_runtime_put_sync(dev); if (ret < 0) dev_dbg(dev, "pm_runtime_put_sync failed\n"); pm_runtime_disable(dev); cdns_pcie_disable_phy(pcie); } static struct platform_driver cdns_pcie_ep_driver = { .driver = { .name = "cdns-pcie-ep", .of_match_table = cdns_pcie_ep_of_match, .pm = &cdns_pcie_pm_ops, }, .probe = cdns_pcie_ep_probe, .shutdown = cdns_pcie_ep_shutdown, }; builtin_platform_driver(cdns_pcie_ep_driver);