Merge branches 'pci/host-designware', 'pci/host-designware-common', 'pci/host-generic', 'pci/host-imx6', 'pci/host-iproc' and 'pci/host-xgene' into next
* pci/host-designware: PCI: designware: Use iATU0 for cfg and IO, iATU1 for MEM PCI: designware: Consolidate outbound iATU programming functions PCI: designware: Add support for x8 links * pci/host-designware-common: PCI: designware: Wait for link to come up with consistent style PCI: layerscape: Factor out ls_pcie_establish_link() PCI: layerscape: Use dw_pcie_link_up() consistently PCI: dra7xx: Use dw_pcie_link_up() consistently PCI: imx6: Rename imx6_pcie_start_link() to imx6_pcie_establish_link() * pci/host-generic: of/pci: Fix pci_address_to_pio() conversion of CPU address to I/O port * pci/host-imx6: PCI: imx6: Add #define PCIE_RC_LCSR PCI: imx6: Use "u32", not "uint32_t" PCI: imx6: Add speed change timeout message * pci/host-iproc: PCI: iproc: Free resource list after registration PCI: iproc: Directly add PCI resources PCI: iproc: Add BCMA PCIe driver PCI: iproc: Allow override of device tree IRQ mapping function * pci/host-xgene: arm64: dts: Add APM X-Gene PCIe MSI nodes PCI: xgene: Add APM X-Gene v1 PCIe MSI/MSIX termination driver
This commit is contained in:
commit
bf933dbb84
|
@ -0,0 +1,68 @@
|
|||
* AppliedMicro X-Gene v1 PCIe MSI controller
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible: should be "apm,xgene1-msi" to identify
|
||||
X-Gene v1 PCIe MSI controller block.
|
||||
- msi-controller: indicates that this is X-Gene v1 PCIe MSI controller node
|
||||
- reg: physical base address (0x79000000) and length (0x900000) for controller
|
||||
registers. These registers include the MSI termination address and data
|
||||
registers as well as the MSI interrupt status registers.
|
||||
- reg-names: not required
|
||||
- interrupts: A list of 16 interrupt outputs of the controller, starting from
|
||||
interrupt number 0x10 to 0x1f.
|
||||
- interrupt-names: not required
|
||||
|
||||
Each PCIe node needs to have property msi-parent that points to msi controller node
|
||||
|
||||
Examples:
|
||||
|
||||
SoC DTSI:
|
||||
|
||||
+ MSI node:
|
||||
msi@79000000 {
|
||||
compatible = "apm,xgene1-msi";
|
||||
msi-controller;
|
||||
reg = <0x00 0x79000000 0x0 0x900000>;
|
||||
interrupts = <0x0 0x10 0x4>
|
||||
<0x0 0x11 0x4>
|
||||
<0x0 0x12 0x4>
|
||||
<0x0 0x13 0x4>
|
||||
<0x0 0x14 0x4>
|
||||
<0x0 0x15 0x4>
|
||||
<0x0 0x16 0x4>
|
||||
<0x0 0x17 0x4>
|
||||
<0x0 0x18 0x4>
|
||||
<0x0 0x19 0x4>
|
||||
<0x0 0x1a 0x4>
|
||||
<0x0 0x1b 0x4>
|
||||
<0x0 0x1c 0x4>
|
||||
<0x0 0x1d 0x4>
|
||||
<0x0 0x1e 0x4>
|
||||
<0x0 0x1f 0x4>;
|
||||
};
|
||||
|
||||
+ PCIe controller node with msi-parent property pointing to MSI node:
|
||||
pcie0: pcie@1f2b0000 {
|
||||
status = "disabled";
|
||||
device_type = "pci";
|
||||
compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
#address-cells = <3>;
|
||||
reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
|
||||
0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
|
||||
reg-names = "csr", "cfg";
|
||||
ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
|
||||
0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
|
||||
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
|
||||
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
|
||||
0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
|
||||
0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
|
||||
0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
|
||||
dma-coherent;
|
||||
clocks = <&pcie0clk 0>;
|
||||
msi-parent= <&msi>;
|
||||
};
|
|
@ -7564,6 +7564,14 @@ L: linux-pci@vger.kernel.org
|
|||
S: Orphan
|
||||
F: drivers/pci/host/*spear*
|
||||
|
||||
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
|
||||
M: Duc Dang <dhdang@apm.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
|
||||
F: drivers/pci/host/pci-xgene-msi.c
|
||||
|
||||
PCMCIA SUBSYSTEM
|
||||
P: Linux PCMCIA Team
|
||||
L: linux-pcmcia@lists.infradead.org
|
||||
|
|
|
@ -374,6 +374,28 @@
|
|||
};
|
||||
};
|
||||
|
||||
msi: msi@79000000 {
|
||||
compatible = "apm,xgene1-msi";
|
||||
msi-controller;
|
||||
reg = <0x00 0x79000000 0x0 0x900000>;
|
||||
interrupts = < 0x0 0x10 0x4
|
||||
0x0 0x11 0x4
|
||||
0x0 0x12 0x4
|
||||
0x0 0x13 0x4
|
||||
0x0 0x14 0x4
|
||||
0x0 0x15 0x4
|
||||
0x0 0x16 0x4
|
||||
0x0 0x17 0x4
|
||||
0x0 0x18 0x4
|
||||
0x0 0x19 0x4
|
||||
0x0 0x1a 0x4
|
||||
0x0 0x1b 0x4
|
||||
0x0 0x1c 0x4
|
||||
0x0 0x1d 0x4
|
||||
0x0 0x1e 0x4
|
||||
0x0 0x1f 0x4>;
|
||||
};
|
||||
|
||||
pcie0: pcie@1f2b0000 {
|
||||
status = "disabled";
|
||||
device_type = "pci";
|
||||
|
@ -395,6 +417,7 @@
|
|||
0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
|
||||
dma-coherent;
|
||||
clocks = <&pcie0clk 0>;
|
||||
msi-parent = <&msi>;
|
||||
};
|
||||
|
||||
pcie1: pcie@1f2c0000 {
|
||||
|
@ -418,6 +441,7 @@
|
|||
0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
|
||||
dma-coherent;
|
||||
clocks = <&pcie1clk 0>;
|
||||
msi-parent = <&msi>;
|
||||
};
|
||||
|
||||
pcie2: pcie@1f2d0000 {
|
||||
|
@ -441,6 +465,7 @@
|
|||
0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
|
||||
dma-coherent;
|
||||
clocks = <&pcie2clk 0>;
|
||||
msi-parent = <&msi>;
|
||||
};
|
||||
|
||||
pcie3: pcie@1f500000 {
|
||||
|
@ -464,6 +489,7 @@
|
|||
0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
|
||||
dma-coherent;
|
||||
clocks = <&pcie3clk 0>;
|
||||
msi-parent = <&msi>;
|
||||
};
|
||||
|
||||
pcie4: pcie@1f510000 {
|
||||
|
@ -487,6 +513,7 @@
|
|||
0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
|
||||
dma-coherent;
|
||||
clocks = <&pcie4clk 0>;
|
||||
msi-parent = <&msi>;
|
||||
};
|
||||
|
||||
serial0: serial@1c020000 {
|
||||
|
|
|
@ -765,7 +765,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
|
|||
spin_lock(&io_range_lock);
|
||||
list_for_each_entry(res, &io_range_list, list) {
|
||||
if (address >= res->start && address < res->start + res->size) {
|
||||
addr = res->start - address + offset;
|
||||
addr = address - res->start + offset;
|
||||
break;
|
||||
}
|
||||
offset += res->size;
|
||||
|
|
|
@ -89,11 +89,20 @@ config PCI_XGENE
|
|||
depends on ARCH_XGENE
|
||||
depends on OF
|
||||
select PCIEPORTBUS
|
||||
select PCI_MSI_IRQ_DOMAIN if PCI_MSI
|
||||
help
|
||||
Say Y here if you want internal PCI support on APM X-Gene SoC.
|
||||
There are 5 internal PCIe ports available. Each port is GEN3 capable
|
||||
and have varied lanes from x1 to x8.
|
||||
|
||||
config PCI_XGENE_MSI
|
||||
bool "X-Gene v1 PCIe MSI feature"
|
||||
depends on PCI_XGENE && PCI_MSI
|
||||
default y
|
||||
help
|
||||
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
|
||||
This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
|
||||
|
||||
config PCI_LAYERSCAPE
|
||||
bool "Freescale Layerscape PCIe controller"
|
||||
depends on OF && ARM
|
||||
|
@ -125,4 +134,15 @@ config PCIE_IPROC_PLATFORM
|
|||
Say Y here if you want to use the Broadcom iProc PCIe controller
|
||||
through the generic platform bus interface
|
||||
|
||||
config PCIE_IPROC_BCMA
|
||||
bool "Broadcom iProc PCIe BCMA bus driver"
|
||||
depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
|
||||
select PCIE_IPROC
|
||||
select BCMA
|
||||
select PCI_DOMAINS
|
||||
default ARCH_BCM_5301X
|
||||
help
|
||||
Say Y here if you want to use the Broadcom iProc PCIe controller
|
||||
through the BCMA bus interface
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -11,7 +11,9 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
|
|||
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
|
||||
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
|
||||
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
|
||||
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
|
||||
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
|
||||
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
|
||||
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
|
||||
obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
|
||||
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
|
||||
|
|
|
@ -93,9 +93,9 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
|
|||
|
||||
static int dra7xx_pcie_establish_link(struct pcie_port *pp)
|
||||
{
|
||||
u32 reg;
|
||||
unsigned int retries = 1000;
|
||||
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
|
||||
u32 reg;
|
||||
unsigned int retries;
|
||||
|
||||
if (dw_pcie_link_up(pp)) {
|
||||
dev_err(pp->dev, "link is already up\n");
|
||||
|
@ -106,19 +106,14 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
|
|||
reg |= LTSSM_EN;
|
||||
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
|
||||
|
||||
while (retries--) {
|
||||
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
|
||||
if (reg & LINK_UP)
|
||||
break;
|
||||
for (retries = 0; retries < 1000; retries++) {
|
||||
if (dw_pcie_link_up(pp))
|
||||
return 0;
|
||||
usleep_range(10, 20);
|
||||
}
|
||||
|
||||
if (retries == 0) {
|
||||
dev_err(pp->dev, "link is not up\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
dev_err(pp->dev, "link is not up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
|
||||
|
|
|
@ -316,9 +316,9 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp)
|
|||
|
||||
static int exynos_pcie_establish_link(struct pcie_port *pp)
|
||||
{
|
||||
u32 val;
|
||||
int count = 0;
|
||||
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
|
||||
u32 val;
|
||||
unsigned int retries;
|
||||
|
||||
if (dw_pcie_link_up(pp)) {
|
||||
dev_err(pp->dev, "Link already up\n");
|
||||
|
@ -357,27 +357,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
|
|||
PCIE_APP_LTSSM_ENABLE);
|
||||
|
||||
/* check if the link is up or not */
|
||||
while (!dw_pcie_link_up(pp)) {
|
||||
mdelay(100);
|
||||
count++;
|
||||
if (count == 10) {
|
||||
while (exynos_phy_readl(exynos_pcie,
|
||||
PCIE_PHY_PLL_LOCKED) == 0) {
|
||||
val = exynos_blk_readl(exynos_pcie,
|
||||
PCIE_PHY_PLL_LOCKED);
|
||||
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
|
||||
}
|
||||
/* power off phy */
|
||||
exynos_pcie_power_off_phy(pp);
|
||||
|
||||
dev_err(pp->dev, "PCIe Link Fail\n");
|
||||
return -EINVAL;
|
||||
for (retries = 0; retries < 10; retries++) {
|
||||
if (dw_pcie_link_up(pp)) {
|
||||
dev_info(pp->dev, "Link up\n");
|
||||
return 0;
|
||||
}
|
||||
mdelay(100);
|
||||
}
|
||||
|
||||
dev_info(pp->dev, "Link up\n");
|
||||
while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
|
||||
val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
|
||||
dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
|
||||
}
|
||||
/* power off phy */
|
||||
exynos_pcie_power_off_phy(pp);
|
||||
|
||||
return 0;
|
||||
dev_err(pp->dev, "PCIe Link Fail\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
|
||||
|
|
|
@ -47,6 +47,8 @@ struct imx6_pcie {
|
|||
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
|
||||
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
|
||||
|
||||
#define PCIE_RC_LCSR 0x80
|
||||
|
||||
/* PCIe Port Logic registers (memory-mapped) */
|
||||
#define PL_OFFSET 0x700
|
||||
#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
|
||||
|
@ -335,21 +337,36 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
|
|||
|
||||
static int imx6_pcie_wait_for_link(struct pcie_port *pp)
|
||||
{
|
||||
int count = 200;
|
||||
unsigned int retries;
|
||||
|
||||
while (!dw_pcie_link_up(pp)) {
|
||||
for (retries = 0; retries < 200; retries++) {
|
||||
if (dw_pcie_link_up(pp))
|
||||
return 0;
|
||||
usleep_range(100, 1000);
|
||||
if (--count)
|
||||
continue;
|
||||
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
|
||||
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
|
||||
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
|
||||
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
|
||||
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
|
||||
{
|
||||
u32 tmp;
|
||||
unsigned int retries;
|
||||
|
||||
for (retries = 0; retries < 200; retries++) {
|
||||
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
/* Test if the speed change finished. */
|
||||
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
|
||||
return 0;
|
||||
usleep_range(100, 1000);
|
||||
}
|
||||
|
||||
dev_err(pp->dev, "Speed change timeout\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
|
||||
|
@ -359,11 +376,11 @@ static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
|
|||
return dw_handle_msi_irq(pp);
|
||||
}
|
||||
|
||||
static int imx6_pcie_start_link(struct pcie_port *pp)
|
||||
static int imx6_pcie_establish_link(struct pcie_port *pp)
|
||||
{
|
||||
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
|
||||
uint32_t tmp;
|
||||
int ret, count;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Force Gen1 operation when starting the link. In case the link is
|
||||
|
@ -397,29 +414,22 @@ static int imx6_pcie_start_link(struct pcie_port *pp)
|
|||
tmp |= PORT_LOGIC_SPEED_CHANGE;
|
||||
writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
|
||||
count = 200;
|
||||
while (count--) {
|
||||
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
/* Test if the speed change finished. */
|
||||
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
|
||||
break;
|
||||
usleep_range(100, 1000);
|
||||
ret = imx6_pcie_wait_for_speed_change(pp);
|
||||
if (ret) {
|
||||
dev_err(pp->dev, "Failed to bring link up!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Make sure link training is finished as well! */
|
||||
if (count)
|
||||
ret = imx6_pcie_wait_for_link(pp);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
ret = imx6_pcie_wait_for_link(pp);
|
||||
if (ret) {
|
||||
dev_err(pp->dev, "Failed to bring link up!\n");
|
||||
} else {
|
||||
tmp = readl(pp->dbi_base + 0x80);
|
||||
dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
|
||||
dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx6_pcie_host_init(struct pcie_port *pp)
|
||||
|
@ -432,7 +442,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
imx6_pcie_start_link(pp);
|
||||
imx6_pcie_establish_link(pp);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
dw_pcie_msi_init(pp);
|
||||
|
@ -440,19 +450,19 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
static void imx6_pcie_reset_phy(struct pcie_port *pp)
|
||||
{
|
||||
uint32_t temp;
|
||||
u32 tmp;
|
||||
|
||||
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
|
||||
temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
|
||||
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
|
||||
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
|
||||
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
|
||||
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
|
||||
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
|
||||
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
|
||||
|
||||
usleep_range(2000, 3000);
|
||||
|
||||
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
|
||||
temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
|
||||
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
|
||||
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
|
||||
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
|
||||
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
|
||||
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
|
||||
}
|
||||
|
||||
static int imx6_pcie_link_up(struct pcie_port *pp)
|
||||
|
|
|
@ -88,7 +88,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
|
|||
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
int count = 200;
|
||||
unsigned int retries;
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
|
@ -99,17 +99,15 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
|||
|
||||
ks_dw_pcie_initiate_link_train(ks_pcie);
|
||||
/* check if the link is up or not */
|
||||
while (!dw_pcie_link_up(pp)) {
|
||||
for (retries = 0; retries < 200; retries++) {
|
||||
if (dw_pcie_link_up(pp))
|
||||
return 0;
|
||||
usleep_range(100, 1000);
|
||||
if (--count) {
|
||||
ks_dw_pcie_initiate_link_train(ks_pcie);
|
||||
continue;
|
||||
}
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
return -EINVAL;
|
||||
ks_dw_pcie_initiate_link_train(ks_pcie);
|
||||
}
|
||||
|
||||
return 0;
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
|
|
|
@ -62,22 +62,27 @@ static int ls_pcie_link_up(struct pcie_port *pp)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int ls_pcie_establish_link(struct pcie_port *pp)
|
||||
{
|
||||
unsigned int retries;
|
||||
|
||||
for (retries = 0; retries < 200; retries++) {
|
||||
if (dw_pcie_link_up(pp))
|
||||
return 0;
|
||||
usleep_range(100, 1000);
|
||||
}
|
||||
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ls_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct ls_pcie *pcie = to_ls_pcie(pp);
|
||||
int count = 0;
|
||||
u32 val;
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
while (!ls_pcie_link_up(pp)) {
|
||||
usleep_range(100, 1000);
|
||||
count++;
|
||||
if (count >= 200) {
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
ls_pcie_establish_link(pp);
|
||||
|
||||
/*
|
||||
* LS1021A Workaround for internal TKT228622
|
||||
|
|
|
@ -0,0 +1,596 @@
|
|||
/*
|
||||
* APM X-Gene MSI Driver
|
||||
*
|
||||
* Copyright (c) 2014, Applied Micro Circuits Corporation
|
||||
* Author: Tanmay Inamdar <tinamdar@apm.com>
|
||||
* Duc Dang <dhdang@apm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of_pci.h>
|
||||
|
||||
#define MSI_IR0 0x000000
|
||||
#define MSI_INT0 0x800000
|
||||
#define IDX_PER_GROUP 8
|
||||
#define IRQS_PER_IDX 16
|
||||
#define NR_HW_IRQS 16
|
||||
#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
|
||||
|
||||
struct xgene_msi_group {
|
||||
struct xgene_msi *msi;
|
||||
int gic_irq;
|
||||
u32 msi_grp;
|
||||
};
|
||||
|
||||
struct xgene_msi {
|
||||
struct device_node *node;
|
||||
struct msi_controller mchip;
|
||||
struct irq_domain *domain;
|
||||
u64 msi_addr;
|
||||
void __iomem *msi_regs;
|
||||
unsigned long *bitmap;
|
||||
struct mutex bitmap_lock;
|
||||
struct xgene_msi_group *msi_groups;
|
||||
int num_cpus;
|
||||
};
|
||||
|
||||
/* Global data */
|
||||
static struct xgene_msi xgene_msi_ctrl;
|
||||
|
||||
static struct irq_chip xgene_msi_top_irq_chip = {
|
||||
.name = "X-Gene1 MSI",
|
||||
.irq_enable = pci_msi_unmask_irq,
|
||||
.irq_disable = pci_msi_mask_irq,
|
||||
.irq_mask = pci_msi_mask_irq,
|
||||
.irq_unmask = pci_msi_unmask_irq,
|
||||
};
|
||||
|
||||
static struct msi_domain_info xgene_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.chip = &xgene_msi_top_irq_chip,
|
||||
};
|
||||
|
||||
/*
|
||||
* X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
|
||||
* n is group number (0..F), x is index of registers in each group (0..7)
|
||||
* The register layout is as follows:
|
||||
* MSI0IR0 base_addr
|
||||
* MSI0IR1 base_addr + 0x10000
|
||||
* ... ...
|
||||
* MSI0IR6 base_addr + 0x60000
|
||||
* MSI0IR7 base_addr + 0x70000
|
||||
* MSI1IR0 base_addr + 0x80000
|
||||
* MSI1IR1 base_addr + 0x90000
|
||||
* ... ...
|
||||
* MSI1IR7 base_addr + 0xF0000
|
||||
* MSI2IR0 base_addr + 0x100000
|
||||
* ... ...
|
||||
* MSIFIR0 base_addr + 0x780000
|
||||
* MSIFIR1 base_addr + 0x790000
|
||||
* ... ...
|
||||
* MSIFIR7 base_addr + 0x7F0000
|
||||
* MSIINT0 base_addr + 0x800000
|
||||
* MSIINT1 base_addr + 0x810000
|
||||
* ... ...
|
||||
* MSIINTF base_addr + 0x8F0000
|
||||
*
|
||||
* Each index register supports 16 MSI vectors (0..15) to generate interrupt.
|
||||
* There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
|
||||
* registers.
|
||||
*
|
||||
* Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
|
||||
* the MSI pending status caused by 1 of its 8 index registers.
|
||||
*/
|
||||
|
||||
/* MSInIRx read helper */
|
||||
static u32 xgene_msi_ir_read(struct xgene_msi *msi,
|
||||
u32 msi_grp, u32 msir_idx)
|
||||
{
|
||||
return readl_relaxed(msi->msi_regs + MSI_IR0 +
|
||||
(msi_grp << 19) + (msir_idx << 16));
|
||||
}
|
||||
|
||||
/* MSIINTn read helper */
|
||||
static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
|
||||
{
|
||||
return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
|
||||
}
|
||||
|
||||
/*
|
||||
* With 2048 MSI vectors supported, the MSI message can be constructed using
|
||||
* following scheme:
|
||||
* - Divide into 8 256-vector groups
|
||||
* Group 0: 0-255
|
||||
* Group 1: 256-511
|
||||
* Group 2: 512-767
|
||||
* ...
|
||||
* Group 7: 1792-2047
|
||||
* - Each 256-vector group is divided into 16 16-vector groups
|
||||
* As an example: 16 16-vector groups for 256-vector group 0-255 is
|
||||
* Group 0: 0-15
|
||||
* Group 1: 16-32
|
||||
* ...
|
||||
* Group 15: 240-255
|
||||
* - The termination address of MSI vector in 256-vector group n and 16-vector
|
||||
* group x is the address of MSIxIRn
|
||||
* - The data for MSI vector in 16-vector group x is x
|
||||
*/
|
||||
static u32 hwirq_to_reg_set(unsigned long hwirq)
|
||||
{
|
||||
return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
|
||||
}
|
||||
|
||||
static u32 hwirq_to_group(unsigned long hwirq)
|
||||
{
|
||||
return (hwirq % NR_HW_IRQS);
|
||||
}
|
||||
|
||||
static u32 hwirq_to_msi_data(unsigned long hwirq)
|
||||
{
|
||||
return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
|
||||
}
|
||||
|
||||
static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
u32 reg_set = hwirq_to_reg_set(data->hwirq);
|
||||
u32 group = hwirq_to_group(data->hwirq);
|
||||
u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
|
||||
|
||||
msg->address_hi = upper_32_bits(target_addr);
|
||||
msg->address_lo = lower_32_bits(target_addr);
|
||||
msg->data = hwirq_to_msi_data(data->hwirq);
|
||||
}
|
||||
|
||||
/*
|
||||
* X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
|
||||
* the expected behaviour of .set_affinity for each MSI interrupt, the 16
|
||||
* MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
|
||||
* for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
|
||||
* MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
|
||||
* consequence, the total MSI vectors that X-Gene v1 supports will be
|
||||
* reduced to 256 (2048/8) vectors.
|
||||
*/
|
||||
static int hwirq_to_cpu(unsigned long hwirq)
|
||||
{
|
||||
return (hwirq % xgene_msi_ctrl.num_cpus);
|
||||
}
|
||||
|
||||
static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
|
||||
{
|
||||
return (hwirq - hwirq_to_cpu(hwirq));
|
||||
}
|
||||
|
||||
static int xgene_msi_set_affinity(struct irq_data *irqdata,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
int target_cpu = cpumask_first(mask);
|
||||
int curr_cpu;
|
||||
|
||||
curr_cpu = hwirq_to_cpu(irqdata->hwirq);
|
||||
if (curr_cpu == target_cpu)
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
|
||||
/* Update MSI number to target the new CPU */
|
||||
irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static struct irq_chip xgene_msi_bottom_irq_chip = {
|
||||
.name = "MSI",
|
||||
.irq_set_affinity = xgene_msi_set_affinity,
|
||||
.irq_compose_msi_msg = xgene_compose_msi_msg,
|
||||
};
|
||||
|
||||
static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *args)
|
||||
{
|
||||
struct xgene_msi *msi = domain->host_data;
|
||||
int msi_irq;
|
||||
|
||||
mutex_lock(&msi->bitmap_lock);
|
||||
|
||||
msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
|
||||
msi->num_cpus, 0);
|
||||
if (msi_irq < NR_MSI_VEC)
|
||||
bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
|
||||
else
|
||||
msi_irq = -ENOSPC;
|
||||
|
||||
mutex_unlock(&msi->bitmap_lock);
|
||||
|
||||
if (msi_irq < 0)
|
||||
return msi_irq;
|
||||
|
||||
irq_domain_set_info(domain, virq, msi_irq,
|
||||
&xgene_msi_bottom_irq_chip, domain->host_data,
|
||||
handle_simple_irq, NULL, NULL);
|
||||
set_irq_flags(virq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_irq_domain_free(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
|
||||
u32 hwirq;
|
||||
|
||||
mutex_lock(&msi->bitmap_lock);
|
||||
|
||||
hwirq = hwirq_to_canonical_hwirq(d->hwirq);
|
||||
bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
|
||||
|
||||
mutex_unlock(&msi->bitmap_lock);
|
||||
|
||||
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops msi_domain_ops = {
|
||||
.alloc = xgene_irq_domain_alloc,
|
||||
.free = xgene_irq_domain_free,
|
||||
};
|
||||
|
||||
static int xgene_allocate_domains(struct xgene_msi *msi)
|
||||
{
|
||||
msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
|
||||
&msi_domain_ops, msi);
|
||||
if (!msi->domain)
|
||||
return -ENOMEM;
|
||||
|
||||
msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
|
||||
&xgene_msi_domain_info,
|
||||
msi->domain);
|
||||
|
||||
if (!msi->mchip.domain) {
|
||||
irq_domain_remove(msi->domain);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_free_domains(struct xgene_msi *msi)
|
||||
{
|
||||
if (msi->mchip.domain)
|
||||
irq_domain_remove(msi->mchip.domain);
|
||||
if (msi->domain)
|
||||
irq_domain_remove(msi->domain);
|
||||
}
|
||||
|
||||
static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
|
||||
{
|
||||
int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
|
||||
|
||||
xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
|
||||
if (!xgene_msi->bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&xgene_msi->bitmap_lock);
|
||||
|
||||
xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
|
||||
sizeof(struct xgene_msi_group),
|
||||
GFP_KERNEL);
|
||||
if (!xgene_msi->msi_groups)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct xgene_msi_group *msi_groups;
|
||||
struct xgene_msi *xgene_msi;
|
||||
unsigned int virq;
|
||||
int msir_index, msir_val, hw_irq;
|
||||
u32 intr_index, grp_select, msi_grp;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
msi_groups = irq_desc_get_handler_data(desc);
|
||||
xgene_msi = msi_groups->msi;
|
||||
msi_grp = msi_groups->msi_grp;
|
||||
|
||||
/*
|
||||
* MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
|
||||
* If bit x of this register is set (x is 0..7), one or more interupts
|
||||
* corresponding to MSInIRx is set.
|
||||
*/
|
||||
grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
|
||||
while (grp_select) {
|
||||
msir_index = ffs(grp_select) - 1;
|
||||
/*
|
||||
* Calculate MSInIRx address to read to check for interrupts
|
||||
* (refer to termination address and data assignment
|
||||
* described in xgene_compose_msi_msg() )
|
||||
*/
|
||||
msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
|
||||
while (msir_val) {
|
||||
intr_index = ffs(msir_val) - 1;
|
||||
/*
|
||||
* Calculate MSI vector number (refer to the termination
|
||||
* address and data assignment described in
|
||||
* xgene_compose_msi_msg function)
|
||||
*/
|
||||
hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
|
||||
NR_HW_IRQS) + msi_grp;
|
||||
/*
|
||||
* As we have multiple hw_irq that maps to single MSI,
|
||||
* always look up the virq using the hw_irq as seen from
|
||||
* CPU0
|
||||
*/
|
||||
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
|
||||
virq = irq_find_mapping(xgene_msi->domain, hw_irq);
|
||||
WARN_ON(!virq);
|
||||
if (virq != 0)
|
||||
generic_handle_irq(virq);
|
||||
msir_val &= ~(1 << intr_index);
|
||||
}
|
||||
grp_select &= ~(1 << msir_index);
|
||||
|
||||
if (!grp_select) {
|
||||
/*
|
||||
* We handled all interrupts happened in this group,
|
||||
* resample this group MSI_INTx register in case
|
||||
* something else has been made pending in the meantime
|
||||
*/
|
||||
grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int xgene_msi_remove(struct platform_device *pdev)
|
||||
{
|
||||
int virq, i;
|
||||
struct xgene_msi *msi = platform_get_drvdata(pdev);
|
||||
|
||||
for (i = 0; i < NR_HW_IRQS; i++) {
|
||||
virq = msi->msi_groups[i].gic_irq;
|
||||
if (virq != 0) {
|
||||
irq_set_chained_handler(virq, NULL);
|
||||
irq_set_handler_data(virq, NULL);
|
||||
}
|
||||
}
|
||||
kfree(msi->msi_groups);
|
||||
|
||||
kfree(msi->bitmap);
|
||||
msi->bitmap = NULL;
|
||||
|
||||
xgene_free_domains(msi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgene_msi_hwirq_alloc(unsigned int cpu)
|
||||
{
|
||||
struct xgene_msi *msi = &xgene_msi_ctrl;
|
||||
struct xgene_msi_group *msi_group;
|
||||
cpumask_var_t mask;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
|
||||
msi_group = &msi->msi_groups[i];
|
||||
if (!msi_group->gic_irq)
|
||||
continue;
|
||||
|
||||
irq_set_chained_handler(msi_group->gic_irq,
|
||||
xgene_msi_isr);
|
||||
err = irq_set_handler_data(msi_group->gic_irq, msi_group);
|
||||
if (err) {
|
||||
pr_err("failed to register GIC IRQ handler\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* Statically allocate MSI GIC IRQs to each CPU core.
|
||||
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
|
||||
* to each core.
|
||||
*/
|
||||
if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
||||
cpumask_clear(mask);
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
err = irq_set_affinity(msi_group->gic_irq, mask);
|
||||
if (err)
|
||||
pr_err("failed to set affinity for GIC IRQ");
|
||||
free_cpumask_var(mask);
|
||||
} else {
|
||||
pr_err("failed to alloc CPU mask for affinity\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
irq_set_chained_handler(msi_group->gic_irq, NULL);
|
||||
irq_set_handler_data(msi_group->gic_irq, NULL);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_msi_hwirq_free(unsigned int cpu)
|
||||
{
|
||||
struct xgene_msi *msi = &xgene_msi_ctrl;
|
||||
struct xgene_msi_group *msi_group;
|
||||
int i;
|
||||
|
||||
for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
|
||||
msi_group = &msi->msi_groups[i];
|
||||
if (!msi_group->gic_irq)
|
||||
continue;
|
||||
|
||||
irq_set_chained_handler(msi_group->gic_irq, NULL);
|
||||
irq_set_handler_data(msi_group->gic_irq, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int xgene_msi_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned cpu = (unsigned long)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
xgene_msi_hwirq_alloc(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
xgene_msi_hwirq_free(cpu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block xgene_msi_cpu_notifier = {
|
||||
.notifier_call = xgene_msi_cpu_callback,
|
||||
};
|
||||
|
||||
static const struct of_device_id xgene_msi_match_table[] = {
|
||||
{.compatible = "apm,xgene1-msi"},
|
||||
{},
|
||||
};
|
||||
|
||||
static int xgene_msi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res;
|
||||
int rc, irq_index;
|
||||
struct xgene_msi *xgene_msi;
|
||||
unsigned int cpu;
|
||||
int virt_msir;
|
||||
u32 msi_val, msi_idx;
|
||||
|
||||
xgene_msi = &xgene_msi_ctrl;
|
||||
|
||||
platform_set_drvdata(pdev, xgene_msi);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(xgene_msi->msi_regs)) {
|
||||
dev_err(&pdev->dev, "no reg space\n");
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
xgene_msi->msi_addr = res->start;
|
||||
|
||||
xgene_msi->num_cpus = num_possible_cpus();
|
||||
|
||||
rc = xgene_msi_init_allocator(xgene_msi);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = xgene_allocate_domains(xgene_msi);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
|
||||
virt_msir = platform_get_irq(pdev, irq_index);
|
||||
if (virt_msir < 0) {
|
||||
dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
|
||||
irq_index);
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
|
||||
xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
|
||||
xgene_msi->msi_groups[irq_index].msi = xgene_msi;
|
||||
}
|
||||
|
||||
/*
|
||||
* MSInIRx registers are read-to-clear; before registering
|
||||
* interrupt handlers, read all of them to clear spurious
|
||||
* interrupts that may occur before the driver is probed.
|
||||
*/
|
||||
for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
|
||||
for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
|
||||
msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
|
||||
msi_idx);
|
||||
/* Read MSIINTn to confirm */
|
||||
msi_val = xgene_msi_int_read(xgene_msi, irq_index);
|
||||
if (msi_val) {
|
||||
dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
if (xgene_msi_hwirq_alloc(cpu)) {
|
||||
dev_err(&pdev->dev, "failed to register MSI handlers\n");
|
||||
cpu_notifier_register_done();
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
|
||||
cpu_notifier_register_done();
|
||||
goto error;
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
xgene_msi->mchip.of_node = pdev->dev.of_node;
|
||||
rc = of_pci_msi_chip_add(&xgene_msi->mchip);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "failed to add MSI controller chip\n");
|
||||
goto error_notifier;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
|
||||
|
||||
return 0;
|
||||
|
||||
error_notifier:
|
||||
unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
|
||||
error:
|
||||
xgene_msi_remove(pdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct platform_driver xgene_msi_driver = {
|
||||
.driver = {
|
||||
.name = "xgene-msi",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = xgene_msi_match_table,
|
||||
},
|
||||
.probe = xgene_msi_probe,
|
||||
.remove = xgene_msi_remove,
|
||||
};
|
||||
|
||||
static int __init xgene_pcie_msi_init(void)
|
||||
{
|
||||
return platform_driver_register(&xgene_msi_driver);
|
||||
}
|
||||
subsys_initcall(xgene_pcie_msi_init);
|
|
@ -468,6 +468,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xgene_pcie_msi_enable(struct pci_bus *bus)
|
||||
{
|
||||
struct device_node *msi_node;
|
||||
|
||||
msi_node = of_parse_phandle(bus->dev.of_node,
|
||||
"msi-parent", 0);
|
||||
if (!msi_node)
|
||||
return -ENODEV;
|
||||
|
||||
bus->msi = of_pci_find_msi_chip_by_node(msi_node);
|
||||
if (!bus->msi)
|
||||
return -ENODEV;
|
||||
|
||||
bus->msi->dev = &bus->dev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *dn = pdev->dev.of_node;
|
||||
|
@ -504,6 +521,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
|
|||
if (!bus)
|
||||
return -ENOMEM;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
if (xgene_pcie_msi_enable(bus))
|
||||
dev_info(port->dev, "failed to enable MSI\n");
|
||||
|
||||
pci_scan_child_bus(bus);
|
||||
pci_assign_unassigned_bus_resources(bus);
|
||||
pci_bus_add_devices(bus);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#define PORT_LINK_MODE_1_LANES (0x1 << 16)
|
||||
#define PORT_LINK_MODE_2_LANES (0x3 << 16)
|
||||
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
|
||||
#define PORT_LINK_MODE_8_LANES (0xf << 16)
|
||||
|
||||
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
|
||||
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
|
||||
|
@ -38,6 +39,7 @@
|
|||
#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
|
||||
#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
|
||||
#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
|
||||
#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
|
||||
|
||||
#define PCIE_MSI_ADDR_LO 0x820
|
||||
#define PCIE_MSI_ADDR_HI 0x824
|
||||
|
@ -150,6 +152,21 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr, u32 size)
|
||||
{
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
|
||||
PCIE_ATU_VIEWPORT);
|
||||
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
||||
static struct irq_chip dw_msi_irq_chip = {
|
||||
.name = "PCI-MSI",
|
||||
.irq_enable = pci_msi_unmask_irq,
|
||||
|
@ -493,6 +510,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
if (pp->ops->host_init)
|
||||
pp->ops->host_init(pp);
|
||||
|
||||
if (!pp->ops->rd_other_conf)
|
||||
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
|
||||
PCIE_ATU_TYPE_MEM, pp->mem_mod_base,
|
||||
pp->mem_bus_addr, pp->mem_size);
|
||||
|
||||
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
|
||||
|
||||
/* program correct class for RC */
|
||||
|
@ -515,115 +537,73 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
|
||||
{
|
||||
/* Program viewport 0 : OUTBOUND : CFG0 */
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
|
||||
PCIE_ATU_VIEWPORT);
|
||||
dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
|
||||
{
|
||||
/* Program viewport 1 : OUTBOUND : CFG1 */
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
|
||||
PCIE_ATU_VIEWPORT);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
|
||||
{
|
||||
/* Program viewport 0 : OUTBOUND : MEM */
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
|
||||
PCIE_ATU_VIEWPORT);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
|
||||
PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
|
||||
{
|
||||
/* Program viewport 1 : OUTBOUND : IO */
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
|
||||
PCIE_ATU_VIEWPORT);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
|
||||
PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
||||
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
||||
u32 devfn, int where, int size, u32 *val)
|
||||
{
|
||||
int ret = PCIBIOS_SUCCESSFUL;
|
||||
u32 address, busdev;
|
||||
int ret, type;
|
||||
u32 address, busdev, cfg_size;
|
||||
u64 cpu_addr;
|
||||
void __iomem *va_cfg_base;
|
||||
|
||||
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
|
||||
PCIE_ATU_FUNC(PCI_FUNC(devfn));
|
||||
address = where & ~0x3;
|
||||
|
||||
if (bus->parent->number == pp->root_bus_nr) {
|
||||
dw_pcie_prog_viewport_cfg0(pp, busdev);
|
||||
ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
|
||||
val);
|
||||
dw_pcie_prog_viewport_mem_outbound(pp);
|
||||
type = PCIE_ATU_TYPE_CFG0;
|
||||
cpu_addr = pp->cfg0_mod_base;
|
||||
cfg_size = pp->cfg0_size;
|
||||
va_cfg_base = pp->va_cfg0_base;
|
||||
} else {
|
||||
dw_pcie_prog_viewport_cfg1(pp, busdev);
|
||||
ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
|
||||
val);
|
||||
dw_pcie_prog_viewport_io_outbound(pp);
|
||||
type = PCIE_ATU_TYPE_CFG1;
|
||||
cpu_addr = pp->cfg1_mod_base;
|
||||
cfg_size = pp->cfg1_size;
|
||||
va_cfg_base = pp->va_cfg1_base;
|
||||
}
|
||||
|
||||
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
|
||||
type, cpu_addr,
|
||||
busdev, cfg_size);
|
||||
ret = dw_pcie_cfg_read(va_cfg_base + address, where, size, val);
|
||||
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
|
||||
PCIE_ATU_TYPE_IO, pp->io_mod_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
||||
u32 devfn, int where, int size, u32 val)
|
||||
{
|
||||
int ret = PCIBIOS_SUCCESSFUL;
|
||||
u32 address, busdev;
|
||||
int ret, type;
|
||||
u32 address, busdev, cfg_size;
|
||||
u64 cpu_addr;
|
||||
void __iomem *va_cfg_base;
|
||||
|
||||
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
|
||||
PCIE_ATU_FUNC(PCI_FUNC(devfn));
|
||||
address = where & ~0x3;
|
||||
|
||||
if (bus->parent->number == pp->root_bus_nr) {
|
||||
dw_pcie_prog_viewport_cfg0(pp, busdev);
|
||||
ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
|
||||
val);
|
||||
dw_pcie_prog_viewport_mem_outbound(pp);
|
||||
type = PCIE_ATU_TYPE_CFG0;
|
||||
cpu_addr = pp->cfg0_mod_base;
|
||||
cfg_size = pp->cfg0_size;
|
||||
va_cfg_base = pp->va_cfg0_base;
|
||||
} else {
|
||||
dw_pcie_prog_viewport_cfg1(pp, busdev);
|
||||
ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
|
||||
val);
|
||||
dw_pcie_prog_viewport_io_outbound(pp);
|
||||
type = PCIE_ATU_TYPE_CFG1;
|
||||
cpu_addr = pp->cfg1_mod_base;
|
||||
cfg_size = pp->cfg1_size;
|
||||
va_cfg_base = pp->va_cfg1_base;
|
||||
}
|
||||
|
||||
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
|
||||
type, cpu_addr,
|
||||
busdev, cfg_size);
|
||||
ret = dw_pcie_cfg_write(va_cfg_base + address, where, size, val);
|
||||
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
|
||||
PCIE_ATU_TYPE_IO, pp->io_mod_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -776,6 +756,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
|||
case 4:
|
||||
val |= PORT_LINK_MODE_4_LANES;
|
||||
break;
|
||||
case 8:
|
||||
val |= PORT_LINK_MODE_8_LANES;
|
||||
break;
|
||||
}
|
||||
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
|
||||
|
||||
|
@ -792,6 +775,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
|||
case 4:
|
||||
val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
|
||||
break;
|
||||
case 8:
|
||||
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
|
||||
break;
|
||||
}
|
||||
dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Broadcom Corporation
|
||||
* Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/bcma/bcma.h>
|
||||
#include <linux/ioport.h>
|
||||
|
||||
#include "pcie-iproc.h"
|
||||
|
||||
|
||||
/* NS: CLASS field is R/O, and set to wrong 0x200 value */
|
||||
static void bcma_pcie2_fixup_class(struct pci_dev *dev)
|
||||
{
|
||||
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
|
||||
}
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
|
||||
|
||||
static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
struct pci_sys_data *sys = dev->sysdata;
|
||||
struct iproc_pcie *pcie = sys->private_data;
|
||||
struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
|
||||
|
||||
return bcma_core_irq(bdev, 5);
|
||||
}
|
||||
|
||||
static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
|
||||
{
|
||||
struct iproc_pcie *pcie;
|
||||
LIST_HEAD(res);
|
||||
struct resource res_mem;
|
||||
int ret;
|
||||
|
||||
pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
return -ENOMEM;
|
||||
|
||||
pcie->dev = &bdev->dev;
|
||||
bcma_set_drvdata(bdev, pcie);
|
||||
|
||||
pcie->base = bdev->io_addr;
|
||||
|
||||
res_mem.start = bdev->addr_s[0];
|
||||
res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
|
||||
res_mem.name = "PCIe MEM space";
|
||||
res_mem.flags = IORESOURCE_MEM;
|
||||
pci_add_resource(&res, &res_mem);
|
||||
|
||||
pcie->map_irq = iproc_pcie_bcma_map_irq;
|
||||
|
||||
ret = iproc_pcie_setup(pcie, &res);
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "PCIe controller setup failed\n");
|
||||
|
||||
pci_free_resource_list(&res);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
|
||||
{
|
||||
struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
|
||||
|
||||
iproc_pcie_remove(pcie);
|
||||
}
|
||||
|
||||
static const struct bcma_device_id iproc_pcie_bcma_table[] = {
|
||||
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table);
|
||||
|
||||
static struct bcma_driver iproc_pcie_bcma_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = iproc_pcie_bcma_table,
|
||||
.probe = iproc_pcie_bcma_probe,
|
||||
.remove = iproc_pcie_bcma_remove,
|
||||
};
|
||||
|
||||
static int __init iproc_pcie_bcma_init(void)
|
||||
{
|
||||
return bcma_driver_register(&iproc_pcie_bcma_driver);
|
||||
}
|
||||
module_init(iproc_pcie_bcma_init);
|
||||
|
||||
static void __exit iproc_pcie_bcma_exit(void)
|
||||
{
|
||||
bcma_driver_unregister(&iproc_pcie_bcma_driver);
|
||||
}
|
||||
module_exit(iproc_pcie_bcma_exit);
|
||||
|
||||
MODULE_AUTHOR("Hauke Mehrtens");
|
||||
MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -69,15 +69,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
pcie->resources = &res;
|
||||
pcie->map_irq = of_irq_parse_and_map_pci;
|
||||
|
||||
ret = iproc_pcie_setup(pcie);
|
||||
if (ret) {
|
||||
ret = iproc_pcie_setup(pcie, &res);
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "PCIe controller setup failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
pci_free_resource_list(&res);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -183,7 +183,7 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
|
|||
writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN);
|
||||
}
|
||||
|
||||
int iproc_pcie_setup(struct iproc_pcie *pcie)
|
||||
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
||||
{
|
||||
int ret;
|
||||
struct pci_bus *bus;
|
||||
|
@ -211,7 +211,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie)
|
|||
pcie->sysdata.private_data = pcie;
|
||||
|
||||
bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops,
|
||||
&pcie->sysdata, pcie->resources);
|
||||
&pcie->sysdata, res);
|
||||
if (!bus) {
|
||||
dev_err(pcie->dev, "unable to create PCI root bus\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -229,7 +229,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie)
|
|||
|
||||
pci_scan_child_bus(bus);
|
||||
pci_assign_unassigned_bus_resources(bus);
|
||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
||||
pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
|
||||
pci_bus_add_devices(bus);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -29,14 +29,14 @@
|
|||
struct iproc_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct list_head *resources;
|
||||
struct pci_sys_data sysdata;
|
||||
struct pci_bus *root_bus;
|
||||
struct phy *phy;
|
||||
int irqs[IPROC_PCIE_MAX_NUM_IRQS];
|
||||
int (*map_irq)(const struct pci_dev *, u8, u8);
|
||||
};
|
||||
|
||||
int iproc_pcie_setup(struct iproc_pcie *pcie);
|
||||
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
|
||||
int iproc_pcie_remove(struct iproc_pcie *pcie);
|
||||
|
||||
#endif /* _PCIE_IPROC_H */
|
||||
|
|
|
@ -146,10 +146,10 @@ struct pcie_app_reg {
|
|||
static int spear13xx_pcie_establish_link(struct pcie_port *pp)
|
||||
{
|
||||
u32 val;
|
||||
int count = 0;
|
||||
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
|
||||
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
|
||||
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
|
||||
unsigned int retries;
|
||||
|
||||
if (dw_pcie_link_up(pp)) {
|
||||
dev_err(pp->dev, "link already up\n");
|
||||
|
@ -201,17 +201,16 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
|
|||
&app_reg->app_ctrl_0);
|
||||
|
||||
/* check if the link is up or not */
|
||||
while (!dw_pcie_link_up(pp)) {
|
||||
mdelay(100);
|
||||
count++;
|
||||
if (count == 10) {
|
||||
dev_err(pp->dev, "link Fail\n");
|
||||
return -EINVAL;
|
||||
for (retries = 0; retries < 10; retries++) {
|
||||
if (dw_pcie_link_up(pp)) {
|
||||
dev_info(pp->dev, "link up\n");
|
||||
return 0;
|
||||
}
|
||||
mdelay(100);
|
||||
}
|
||||
dev_info(pp->dev, "link up\n");
|
||||
|
||||
return 0;
|
||||
dev_err(pp->dev, "link Fail\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
|
||||
|
|
Loading…
Reference in New Issue