linux-sg2042/drivers/pci/dwc/pcie-designware.c

833 lines
20 KiB
C
Raw Normal View History

/*
* Synopsys Designware PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include "pcie-designware.h"
/* PCIe Port Logic registers */
#define PLR_OFFSET 0x700
#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
static struct pci_ops dw_pcie_ops;
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
{
if ((uintptr_t)addr & (size - 1)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
if (size == 4)
*val = readl(addr);
else if (size == 2)
*val = readw(addr);
else if (size == 1)
*val = readb(addr);
else {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
}
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
{
if ((uintptr_t)addr & (size - 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
writel(val, addr);
else if (size == 2)
writew(val, addr);
else if (size == 1)
writeb(val, addr);
else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
if (pp->ops->readl_rc)
return pp->ops->readl_rc(pp, reg);
return readl(pp->dbi_base + reg);
}
void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
if (pp->ops->writel_rc)
pp->ops->writel_rc(pp, reg, val);
else
writel(val, pp->dbi_base + reg);
}
static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
return dw_pcie_readl_rc(pp, offset + reg);
}
static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
u32 val)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
dw_pcie_writel_rc(pp, offset + reg, val);
}
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
if (pp->ops->rd_own_conf)
return pp->ops->rd_own_conf(pp, where, size, val);
return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
}
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
if (pp->ops->wr_own_conf)
return pp->ops->wr_own_conf(pp, where, size, val);
return dw_pcie_cfg_write(pp->dbi_base + where, size, val);
}
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
u32 retries, val;
if (pp->iatu_unroll_enabled) {
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
type);
dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
} else {
dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
PCIE_ATU_REGION_OUTBOUND | index);
dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
lower_32_bits(cpu_addr + size - 1));
dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
}
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
if (pp->iatu_unroll_enabled)
val = dw_pcie_readl_unroll(pp, index,
PCIE_ATU_UNR_REGION_CTRL2);
else
val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);
if (val == PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
dev_err(pp->dev, "iATU is not being enabled\n");
}
static struct irq_chip dw_msi_irq_chip = {
.name = "PCI-MSI",
.irq_enable = pci_msi_unmask_irq,
.irq_disable = pci_msi_mask_irq,
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
unsigned long val;
int i, pos, irq;
irqreturn_t ret = IRQ_NONE;
for (i = 0; i < MAX_MSI_CTRLS; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
(u32 *)&val);
if (val) {
ret = IRQ_HANDLED;
pos = 0;
while ((pos = find_next_bit(&val, 32, pos)) != 32) {
irq = irq_find_mapping(pp->irq_domain,
i * 32 + pos);
dw_pcie_wr_own_conf(pp,
PCIE_MSI_INTR0_STATUS + i * 12,
4, 1 << pos);
generic_handle_irq(irq);
pos++;
}
}
}
return ret;
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
u64 msi_target;
pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
msi_target = virt_to_phys((void *)pp->msi_data);
/* program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
(u32)(msi_target & 0xffffffff));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
(u32)(msi_target >> 32 & 0xffffffff));
}
2014-07-22 00:58:42 +08:00
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
res = (irq / 32) * 12;
bit = irq % 32;
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
unsigned int nvec, unsigned int pos)
{
2014-07-22 00:58:42 +08:00
unsigned int i;
for (i = 0; i < nvec; i++) {
irq_set_msi_desc_off(irq_base, i, NULL);
/* Disable corresponding interrupt on MSI controller */
2014-07-22 00:58:42 +08:00
if (pp->ops->msi_clear_irq)
pp->ops->msi_clear_irq(pp, pos + i);
else
dw_pcie_msi_clear_irq(pp, pos + i);
}
bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
}
2014-07-22 00:58:42 +08:00
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
res = (irq / 32) * 12;
bit = irq % 32;
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
if (pos0 < 0)
goto no_valid_irq;
irq = irq_find_mapping(pp->irq_domain, pos0);
if (!irq)
goto no_valid_irq;
/*
* irq_create_mapping (called from dw_pcie_host_init) pre-allocates
* descs so there is no need to allocate descs here. We can therefore
* assume that if irq_find_mapping above returns non-zero, then the
* descs are also successfully allocated.
*/
for (i = 0; i < no_irqs; i++) {
if (irq_set_msi_desc_off(irq, i, desc) != 0) {
clear_irq_range(pp, irq, i, pos0);
goto no_valid_irq;
}
/*Enable corresponding interrupt in MSI interrupt controller */
2014-07-22 00:58:42 +08:00
if (pp->ops->msi_set_irq)
pp->ops->msi_set_irq(pp, pos0 + i);
else
dw_pcie_msi_set_irq(pp, pos0 + i);
}
*pos = pos0;
desc->nvec_used = no_irqs;
desc->msi_attrib.multiple = order_base_2(no_irqs);
return irq;
no_valid_irq:
*pos = pos0;
return -ENOSPC;
}
static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
{
struct msi_msg msg;
u64 msi_target;
if (pp->ops->get_msi_addr)
msi_target = pp->ops->get_msi_addr(pp);
2014-07-22 00:58:42 +08:00
else
msi_target = virt_to_phys((void *)pp->msi_data);
msg.address_lo = (u32)(msi_target & 0xffffffff);
msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
if (pp->ops->get_msi_data)
msg.data = pp->ops->get_msi_data(pp, pos);
else
msg.data = pos;
pci_write_msi_msg(irq, &msg);
}
static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
int irq, pos;
struct pcie_port *pp = pdev->bus->sysdata;
if (desc->msi_attrib.is_msix)
return -EINVAL;
irq = assign_irq(1, desc, &pos);
if (irq < 0)
return irq;
dw_msi_setup_msg(pp, irq, pos);
return 0;
}
static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
int nvec, int type)
{
#ifdef CONFIG_PCI_MSI
int irq, pos;
struct msi_desc *desc;
struct pcie_port *pp = pdev->bus->sysdata;
/* MSI-X interrupts are not supported */
if (type == PCI_CAP_ID_MSIX)
return -EINVAL;
WARN_ON(!list_is_singular(&pdev->dev.msi_list));
desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
irq = assign_irq(nvec, desc, &pos);
if (irq < 0)
return irq;
dw_msi_setup_msg(pp, irq, pos);
return 0;
#else
return -EINVAL;
#endif
}
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
struct msi_desc *msi = irq_data_get_msi_desc(data);
struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
clear_irq_range(pp, irq, 1, data->hwirq);
}
static struct msi_controller dw_pcie_msi_chip = {
.setup_irq = dw_msi_setup_irq,
.setup_irqs = dw_msi_setup_irqs,
.teardown_irq = dw_msi_teardown_irq,
};
int dw_pcie_wait_for_link(struct pcie_port *pp)
{
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pp)) {
dev_info(pp->dev, "link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
dev_err(pp->dev, "phy link never came up\n");
return -ETIMEDOUT;
}
int dw_pcie_link_up(struct pcie_port *pp)
{
u32 val;
if (pp->ops->link_up)
return pp->ops->link_up(pp);
val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
(!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
}
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map,
};
static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
{
u32 val;
val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
return 1;
return 0;
}
int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct platform_device *pdev = to_platform_device(pp->dev);
struct pci_bus *bus, *child;
struct resource *cfg_res;
int i, ret;
LIST_HEAD(res);
PCI: designware: Fix pci_remap_iospace() failure path On ARM/ARM64 architectures, PCI IO ports are emulated through memory mapped IO, by reserving a chunk of virtual address space starting at PCI_IOBASE and by mapping the PCI host bridges memory address space driving PCI IO cycles to it. PCI host bridge drivers that enable downstream PCI IO cycles map the host bridge memory address responding to PCI IO cycles to the fixed virtual address space through the pci_remap_iospace() API. This means that if the pci_remap_iospace() function fails, the corresponding host bridge PCI IO resource must be considered invalid, in that there is no way for the kernel to actually drive PCI IO transactions if the memory addresses responding to PCI IO cycles cannot be mapped into the CPU virtual address space. The PCI designware host bridge driver does not remove the PCI IO resource from the host bridge resource windows if the pci_remap_iospace() call fails; this is an actual bug in that the PCI host bridge would consider the PCI IO resource valid (and possibly assign it to downstream devices) even if the kernel was not able to map the PCI host bridge memory address driving IO cycle to the CPU virtual address space (ie pci_remap_iospace() failures). Fix the PCI host bridge driver pci_remap_iospace() failure path, by destroying the PCI host bridge PCI IO resources retrieved through firmware when the pci_remap_iospace() function call fails, therefore preventing the kernel from adding the respective PCI IO resource to the list of PCI host bridge valid resources, fixing the issue. Fixes: cbce7900598c ("PCI: designware: Make driver arch-agnostic") Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> CC: Jingoo Han <jingoohan1@gmail.com> CC: Pratyush Anand <pratyush.anand@gmail.com>
2016-08-16 00:50:42 +08:00
struct resource_entry *win, *tmp;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
pp->cfg0_size = resource_size(cfg_res)/2;
pp->cfg1_size = resource_size(cfg_res)/2;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
dev_err(pp->dev, "missing *config* reg space\n");
}
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
if (ret)
return ret;
ret = devm_request_pci_bus_resources(&pdev->dev, &res);
if (ret)
goto error;
/* Get the I/O and memory ranges from DT */
PCI: designware: Fix pci_remap_iospace() failure path On ARM/ARM64 architectures, PCI IO ports are emulated through memory mapped IO, by reserving a chunk of virtual address space starting at PCI_IOBASE and by mapping the PCI host bridges memory address space driving PCI IO cycles to it. PCI host bridge drivers that enable downstream PCI IO cycles map the host bridge memory address responding to PCI IO cycles to the fixed virtual address space through the pci_remap_iospace() API. This means that if the pci_remap_iospace() function fails, the corresponding host bridge PCI IO resource must be considered invalid, in that there is no way for the kernel to actually drive PCI IO transactions if the memory addresses responding to PCI IO cycles cannot be mapped into the CPU virtual address space. The PCI designware host bridge driver does not remove the PCI IO resource from the host bridge resource windows if the pci_remap_iospace() call fails; this is an actual bug in that the PCI host bridge would consider the PCI IO resource valid (and possibly assign it to downstream devices) even if the kernel was not able to map the PCI host bridge memory address driving IO cycle to the CPU virtual address space (ie pci_remap_iospace() failures). Fix the PCI host bridge driver pci_remap_iospace() failure path, by destroying the PCI host bridge PCI IO resources retrieved through firmware when the pci_remap_iospace() function call fails, therefore preventing the kernel from adding the respective PCI IO resource to the list of PCI host bridge valid resources, fixing the issue. Fixes: cbce7900598c ("PCI: designware: Make driver arch-agnostic") Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> CC: Jingoo Han <jingoohan1@gmail.com> CC: Pratyush Anand <pratyush.anand@gmail.com>
2016-08-16 00:50:42 +08:00
resource_list_for_each_entry_safe(win, tmp, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
PCI: designware: Fix pci_remap_iospace() failure path On ARM/ARM64 architectures, PCI IO ports are emulated through memory mapped IO, by reserving a chunk of virtual address space starting at PCI_IOBASE and by mapping the PCI host bridges memory address space driving PCI IO cycles to it. PCI host bridge drivers that enable downstream PCI IO cycles map the host bridge memory address responding to PCI IO cycles to the fixed virtual address space through the pci_remap_iospace() API. This means that if the pci_remap_iospace() function fails, the corresponding host bridge PCI IO resource must be considered invalid, in that there is no way for the kernel to actually drive PCI IO transactions if the memory addresses responding to PCI IO cycles cannot be mapped into the CPU virtual address space. The PCI designware host bridge driver does not remove the PCI IO resource from the host bridge resource windows if the pci_remap_iospace() call fails; this is an actual bug in that the PCI host bridge would consider the PCI IO resource valid (and possibly assign it to downstream devices) even if the kernel was not able to map the PCI host bridge memory address driving IO cycle to the CPU virtual address space (ie pci_remap_iospace() failures). Fix the PCI host bridge driver pci_remap_iospace() failure path, by destroying the PCI host bridge PCI IO resources retrieved through firmware when the pci_remap_iospace() function call fails, therefore preventing the kernel from adding the respective PCI IO resource to the list of PCI host bridge valid resources, fixing the issue. Fixes: cbce7900598c ("PCI: designware: Make driver arch-agnostic") Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> CC: Jingoo Han <jingoohan1@gmail.com> CC: Pratyush Anand <pratyush.anand@gmail.com>
2016-08-16 00:50:42 +08:00
ret = pci_remap_iospace(win->res, pp->io_base);
if (ret) {
dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
PCI: designware: Fix pci_remap_iospace() failure path On ARM/ARM64 architectures, PCI IO ports are emulated through memory mapped IO, by reserving a chunk of virtual address space starting at PCI_IOBASE and by mapping the PCI host bridges memory address space driving PCI IO cycles to it. PCI host bridge drivers that enable downstream PCI IO cycles map the host bridge memory address responding to PCI IO cycles to the fixed virtual address space through the pci_remap_iospace() API. This means that if the pci_remap_iospace() function fails, the corresponding host bridge PCI IO resource must be considered invalid, in that there is no way for the kernel to actually drive PCI IO transactions if the memory addresses responding to PCI IO cycles cannot be mapped into the CPU virtual address space. The PCI designware host bridge driver does not remove the PCI IO resource from the host bridge resource windows if the pci_remap_iospace() call fails; this is an actual bug in that the PCI host bridge would consider the PCI IO resource valid (and possibly assign it to downstream devices) even if the kernel was not able to map the PCI host bridge memory address driving IO cycle to the CPU virtual address space (ie pci_remap_iospace() failures). Fix the PCI host bridge driver pci_remap_iospace() failure path, by destroying the PCI host bridge PCI IO resources retrieved through firmware when the pci_remap_iospace() function call fails, therefore preventing the kernel from adding the respective PCI IO resource to the list of PCI host bridge valid resources, fixing the issue. Fixes: cbce7900598c ("PCI: designware: Make driver arch-agnostic") Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> CC: Jingoo Han <jingoohan1@gmail.com> CC: Pratyush Anand <pratyush.anand@gmail.com>
2016-08-16 00:50:42 +08:00
ret, win->res);
resource_list_destroy_entry(win);
} else {
pp->io = win->res;
pp->io->name = "I/O";
pp->io_size = resource_size(pp->io);
pp->io_bus_addr = pp->io->start - win->offset;
}
break;
case IORESOURCE_MEM:
pp->mem = win->res;
pp->mem->name = "MEM";
pp->mem_size = resource_size(pp->mem);
pp->mem_bus_addr = pp->mem->start - win->offset;
break;
case 0:
pp->cfg = win->res;
pp->cfg0_size = resource_size(pp->cfg)/2;
pp->cfg1_size = resource_size(pp->cfg)/2;
pp->cfg0_base = pp->cfg->start;
pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
break;
case IORESOURCE_BUS:
pp->busn = win->res;
break;
}
}
if (!pp->dbi_base) {
pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
resource_size(pp->cfg));
if (!pp->dbi_base) {
dev_err(pp->dev, "error with ioremap\n");
ret = -ENOMEM;
goto error;
}
}
pp->mem_base = pp->mem->start;
if (!pp->va_cfg0_base) {
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
pp->cfg0_size);
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
if (!pp->va_cfg0_base) {
dev_err(pp->dev, "error with ioremap in function\n");
ret = -ENOMEM;
goto error;
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
}
}
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
if (!pp->va_cfg1_base) {
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
pp->cfg1_size);
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
if (!pp->va_cfg1_base) {
dev_err(pp->dev, "error with ioremap\n");
ret = -ENOMEM;
goto error;
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
}
}
ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
if (ret)
pp->lanes = 0;
ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
if (ret)
pp->num_viewport = 2;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
MAX_MSI_IRQS, &msi_domain_ops,
&dw_pcie_msi_chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
ret = -ENXIO;
goto error;
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
}
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i);
} else {
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
if (ret < 0)
goto error;
PCI: designware: Add support for v3.65 hardware The Keystone PCI controller is based on v3.65 DesignWare hardware. This version differs from newer versions of the hardware in functional areas discussed below that make it necessary to change dw_pcie_host_init() to support v3.65 based PCI controller. 1. No support for ATU port. Any ATU-specific resource handling code is to be bypassed for v3.65 h/w. 2. MSI controller uses application space to implement MSI and 32 MSI interrupts are multiplexed over 8 IRQs to the host. Hence the code to process MSI IRQ needs to be different. This patch allows platform driver to provide its own irq_domain_ops ptr to irq_domain_add_linear() through an API callback from the DesignWare core driver. 3. MSI interrupt generation requires EP to write to the RC's application register. So enhance the driver to allow setup of inbound access to MSI IRQ register as a post scan bus API callback. Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Pratyush Anand <pratyush.anand@st.com> Acked-by: Mohit KUMAR <mohit.kumar@st.com> Acked-by: Jingoo Han <jg1.han@samsung.com> CC: Santosh Shilimkar <santosh.shilimkar@ti.com> CC: Russell King <linux@arm.linux.org.uk> CC: Grant Likely <grant.likely@linaro.org> CC: Rob Herring <robh+dt@kernel.org> CC: Jingoo Han <jg1.han@samsung.com> CC: Richard Zhu <r65037@freescale.com> CC: Kishon Vijay Abraham I <kishon@ti.com> CC: Marek Vasut <marex@denx.de> CC: Arnd Bergmann <arnd@arndb.de> CC: Pawel Moll <pawel.moll@arm.com> CC: Mark Rutland <mark.rutland@arm.com> CC: Ian Campbell <ijc+devicetree@hellion.org.uk> CC: Kumar Gala <galak@codeaurora.org> CC: Randy Dunlap <rdunlap@infradead.org> CC: Grant Likely <grant.likely@linaro.org>
2014-07-24 02:54:51 +08:00
}
}
if (pp->ops->host_init)
pp->ops->host_init(pp);
pp->root_bus_nr = pp->busn->start;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
&dw_pcie_ops, pp, &res,
&dw_pcie_msi_chip);
dw_pcie_msi_chip.dev = pp->dev;
} else
bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
pp, &res);
if (!bus) {
ret = -ENOMEM;
goto error;
}
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
#ifdef CONFIG_ARM
/* support old dtbs that incorrectly describe IRQs */
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
error:
pci_free_resource_list(&res);
return ret;
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
int ret, type;
u32 busdev, cfg_size;
u64 cpu_addr;
void __iomem *va_cfg_base;
if (pp->ops->rd_other_conf)
return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
if (bus->parent->number == pp->root_bus_nr) {
type = PCIE_ATU_TYPE_CFG0;
cpu_addr = pp->cfg0_base;
cfg_size = pp->cfg0_size;
va_cfg_base = pp->va_cfg0_base;
} else {
type = PCIE_ATU_TYPE_CFG1;
cpu_addr = pp->cfg1_base;
cfg_size = pp->cfg1_size;
va_cfg_base = pp->va_cfg1_base;
}
PCI: designware: Exchange viewport of `MEMORYs' and `CFGs/IOs' When we have only two view ports in a DesignWare PCIe platform, iatu0 is used for both CFG and IO accesses. When CFGs are sent to peripherals (e.g., lspci), iatu0 frequently switches between CFG and IO. For such scenarios, a MEMORY might be sent as an IOs by mistake. Considering the following configurations: MEMORY -> BASE_ADDR: 0xb4100000, LIMIT: 0xb4100FFF, TYPE=mem CFG -> BASE_ADDR: 0xb4000000, LIMIT: 0xb4000FFF, TYPE=cfg IO -> BASE_ADDR: 0xFFFFFFFF, LIMIT: 0xFFFFFFFE, TYPE=io Suppose PCIe has just completed a CFG access. To switch back to IO, it sets the BASE_ADDR to 0xFFFFFFFF, LIMIT 0xFFFFFFFE and TYPE to IO. When another CFG comes, the BASE_ADDR is set to 0xb4000000 to switch to CFG. At this moment, a MEMORY access shows up, since it matches with iatu0 (due to 0xb4000000 <= MEMORY BASE_ADDR <= MEMORY LIMIT <= 0xFFFFFFF), it is treated as an IO access by mistake, then sent to perpheral. This patch fixes the problem by exchanging the assignments of `MEMORYs' and `CFGs/IOs', which assigning MEMORYs to iatu0, CFGs and IOs to iatu1. We can still have issues with IO transfer, however memory transfer is used predominantly therefore we are just minimizing the risk of failure. Actually, we can not do much when we have only two viewports. We can either not allow the less frequent IO transfers at all, or can live with a remote possibility of getting it corrupted. Signed-off-by: Dong Bo <dongbo4@huawei.com> [pratyush.anand@gmail.com: Modified commit log to capture remote risk] Signed-off-by: Pratyush Anand <pratyush.anand@gmail.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2016-07-05 00:14:43 +08:00
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
if (pp->num_viewport <= 2)
PCI: designware: Exchange viewport of `MEMORYs' and `CFGs/IOs' When we have only two view ports in a DesignWare PCIe platform, iatu0 is used for both CFG and IO accesses. When CFGs are sent to peripherals (e.g., lspci), iatu0 frequently switches between CFG and IO. For such scenarios, a MEMORY might be sent as an IOs by mistake. Considering the following configurations: MEMORY -> BASE_ADDR: 0xb4100000, LIMIT: 0xb4100FFF, TYPE=mem CFG -> BASE_ADDR: 0xb4000000, LIMIT: 0xb4000FFF, TYPE=cfg IO -> BASE_ADDR: 0xFFFFFFFF, LIMIT: 0xFFFFFFFE, TYPE=io Suppose PCIe has just completed a CFG access. To switch back to IO, it sets the BASE_ADDR to 0xFFFFFFFF, LIMIT 0xFFFFFFFE and TYPE to IO. When another CFG comes, the BASE_ADDR is set to 0xb4000000 to switch to CFG. At this moment, a MEMORY access shows up, since it matches with iatu0 (due to 0xb4000000 <= MEMORY BASE_ADDR <= MEMORY LIMIT <= 0xFFFFFFF), it is treated as an IO access by mistake, then sent to perpheral. This patch fixes the problem by exchanging the assignments of `MEMORYs' and `CFGs/IOs', which assigning MEMORYs to iatu0, CFGs and IOs to iatu1. We can still have issues with IO transfer, however memory transfer is used predominantly therefore we are just minimizing the risk of failure. Actually, we can not do much when we have only two viewports. We can either not allow the less frequent IO transfers at all, or can live with a remote possibility of getting it corrupted. Signed-off-by: Dong Bo <dongbo4@huawei.com> [pratyush.anand@gmail.com: Modified commit log to capture remote risk] Signed-off-by: Pratyush Anand <pratyush.anand@gmail.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2016-07-05 00:14:43 +08:00
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
}
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
int ret, type;
u32 busdev, cfg_size;
u64 cpu_addr;
void __iomem *va_cfg_base;
if (pp->ops->wr_other_conf)
return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
if (bus->parent->number == pp->root_bus_nr) {
type = PCIE_ATU_TYPE_CFG0;
cpu_addr = pp->cfg0_base;
cfg_size = pp->cfg0_size;
va_cfg_base = pp->va_cfg0_base;
} else {
type = PCIE_ATU_TYPE_CFG1;
cpu_addr = pp->cfg1_base;
cfg_size = pp->cfg1_size;
va_cfg_base = pp->va_cfg1_base;
}
PCI: designware: Exchange viewport of `MEMORYs' and `CFGs/IOs' When we have only two view ports in a DesignWare PCIe platform, iatu0 is used for both CFG and IO accesses. When CFGs are sent to peripherals (e.g., lspci), iatu0 frequently switches between CFG and IO. For such scenarios, a MEMORY might be sent as an IOs by mistake. Considering the following configurations: MEMORY -> BASE_ADDR: 0xb4100000, LIMIT: 0xb4100FFF, TYPE=mem CFG -> BASE_ADDR: 0xb4000000, LIMIT: 0xb4000FFF, TYPE=cfg IO -> BASE_ADDR: 0xFFFFFFFF, LIMIT: 0xFFFFFFFE, TYPE=io Suppose PCIe has just completed a CFG access. To switch back to IO, it sets the BASE_ADDR to 0xFFFFFFFF, LIMIT 0xFFFFFFFE and TYPE to IO. When another CFG comes, the BASE_ADDR is set to 0xb4000000 to switch to CFG. At this moment, a MEMORY access shows up, since it matches with iatu0 (due to 0xb4000000 <= MEMORY BASE_ADDR <= MEMORY LIMIT <= 0xFFFFFFF), it is treated as an IO access by mistake, then sent to perpheral. This patch fixes the problem by exchanging the assignments of `MEMORYs' and `CFGs/IOs', which assigning MEMORYs to iatu0, CFGs and IOs to iatu1. We can still have issues with IO transfer, however memory transfer is used predominantly therefore we are just minimizing the risk of failure. Actually, we can not do much when we have only two viewports. We can either not allow the less frequent IO transfers at all, or can live with a remote possibility of getting it corrupted. Signed-off-by: Dong Bo <dongbo4@huawei.com> [pratyush.anand@gmail.com: Modified commit log to capture remote risk] Signed-off-by: Pratyush Anand <pratyush.anand@gmail.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2016-07-05 00:14:43 +08:00
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
if (pp->num_viewport <= 2)
PCI: designware: Exchange viewport of `MEMORYs' and `CFGs/IOs' When we have only two view ports in a DesignWare PCIe platform, iatu0 is used for both CFG and IO accesses. When CFGs are sent to peripherals (e.g., lspci), iatu0 frequently switches between CFG and IO. For such scenarios, a MEMORY might be sent as an IOs by mistake. Considering the following configurations: MEMORY -> BASE_ADDR: 0xb4100000, LIMIT: 0xb4100FFF, TYPE=mem CFG -> BASE_ADDR: 0xb4000000, LIMIT: 0xb4000FFF, TYPE=cfg IO -> BASE_ADDR: 0xFFFFFFFF, LIMIT: 0xFFFFFFFE, TYPE=io Suppose PCIe has just completed a CFG access. To switch back to IO, it sets the BASE_ADDR to 0xFFFFFFFF, LIMIT 0xFFFFFFFE and TYPE to IO. When another CFG comes, the BASE_ADDR is set to 0xb4000000 to switch to CFG. At this moment, a MEMORY access shows up, since it matches with iatu0 (due to 0xb4000000 <= MEMORY BASE_ADDR <= MEMORY LIMIT <= 0xFFFFFFF), it is treated as an IO access by mistake, then sent to perpheral. This patch fixes the problem by exchanging the assignments of `MEMORYs' and `CFGs/IOs', which assigning MEMORYs to iatu0, CFGs and IOs to iatu1. We can still have issues with IO transfer, however memory transfer is used predominantly therefore we are just minimizing the risk of failure. Actually, we can not do much when we have only two viewports. We can either not allow the less frequent IO transfers at all, or can live with a remote possibility of getting it corrupted. Signed-off-by: Dong Bo <dongbo4@huawei.com> [pratyush.anand@gmail.com: Modified commit log to capture remote risk] Signed-off-by: Pratyush Anand <pratyush.anand@gmail.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2016-07-05 00:14:43 +08:00
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
}
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
if (!dw_pcie_link_up(pp))
return 0;
}
/* access only one slot on each root port */
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
return 1;
}
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = bus->sysdata;
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (bus->number == pp->root_bus_nr)
return dw_pcie_rd_own_conf(pp, where, size, val);
return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
}
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = bus->sysdata;
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == pp->root_bus_nr)
return dw_pcie_wr_own_conf(pp, where, size, val);
return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
}
static struct pci_ops dw_pcie_ops = {
.read = dw_pcie_rd_conf,
.write = dw_pcie_wr_conf,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
switch (pp->lanes) {
case 1:
val |= PORT_LINK_MODE_1_LANES;
break;
case 2:
val |= PORT_LINK_MODE_2_LANES;
break;
case 4:
val |= PORT_LINK_MODE_4_LANES;
break;
case 8:
val |= PORT_LINK_MODE_8_LANES;
break;
default:
dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
return;
}
dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
/* set link width speed control register */
val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pp->lanes) {
case 1:
val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
case 2:
val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
break;
case 4:
val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
case 8:
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
}
dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
/* setup RC BARs */
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000);
/* setup interrupt pins */
val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val);
/* setup bus numbers */
val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00010100;
dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
/* setup command register */
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_rc(pp, PCI_COMMAND, val);
/*
* If the platform provides ->rd_other_conf, it means the platform
* uses its own address translation component rather than ATU, so
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
/* get iATU unroll support */
pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
dev_dbg(pp->dev, "iATU unroll: %s\n",
pp->iatu_unroll_enabled ? "enabled" : "disabled");
PCI: designware: Exchange viewport of `MEMORYs' and `CFGs/IOs' When we have only two view ports in a DesignWare PCIe platform, iatu0 is used for both CFG and IO accesses. When CFGs are sent to peripherals (e.g., lspci), iatu0 frequently switches between CFG and IO. For such scenarios, a MEMORY might be sent as an IOs by mistake. Considering the following configurations: MEMORY -> BASE_ADDR: 0xb4100000, LIMIT: 0xb4100FFF, TYPE=mem CFG -> BASE_ADDR: 0xb4000000, LIMIT: 0xb4000FFF, TYPE=cfg IO -> BASE_ADDR: 0xFFFFFFFF, LIMIT: 0xFFFFFFFE, TYPE=io Suppose PCIe has just completed a CFG access. To switch back to IO, it sets the BASE_ADDR to 0xFFFFFFFF, LIMIT 0xFFFFFFFE and TYPE to IO. When another CFG comes, the BASE_ADDR is set to 0xb4000000 to switch to CFG. At this moment, a MEMORY access shows up, since it matches with iatu0 (due to 0xb4000000 <= MEMORY BASE_ADDR <= MEMORY LIMIT <= 0xFFFFFFF), it is treated as an IO access by mistake, then sent to perpheral. This patch fixes the problem by exchanging the assignments of `MEMORYs' and `CFGs/IOs', which assigning MEMORYs to iatu0, CFGs and IOs to iatu1. We can still have issues with IO transfer, however memory transfer is used predominantly therefore we are just minimizing the risk of failure. Actually, we can not do much when we have only two viewports. We can either not allow the less frequent IO transfers at all, or can live with a remote possibility of getting it corrupted. Signed-off-by: Dong Bo <dongbo4@huawei.com> [pratyush.anand@gmail.com: Modified commit log to capture remote risk] Signed-off-by: Pratyush Anand <pratyush.anand@gmail.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2016-07-05 00:14:43 +08:00
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
if (pp->num_viewport > 2)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
}
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
/* program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}