Merge branches 'pci/host-designware', 'pci/host-imx6', 'pci/host-keystone', 'pci/host-tegra' and 'pci/host-xilinx' into next
* pci/host-designware: PCI: designware: Fold struct pcie_port_info into struct pcie_port * pci/host-imx6: PCI: imx6: Delay enabling reference clock for SS until it stabilizes * pci/host-keystone: PCI: keystone: Set device ID based on SoC to support multiple ports PCI: keystone: Assume controller is already in RC mode PCI: keystone: Limit MRSS for all downstream devices * pci/host-tegra: PCI: tegra: Add Tegra124 support PCI: tegra: Make sure the PCIe PLL is really reset PCI: tegra: Fix extended configuration space mapping PCI: tegra: Clear CLKREQ# enable on port disable * pci/host-xilinx: PCI: xilinx: Fix xilinx_pcie_assign_msi() return value test
This commit is contained in:
commit
ef39ab79f7
|
@ -1,7 +1,10 @@
|
|||
NVIDIA Tegra PCIe controller
|
||||
|
||||
Required properties:
|
||||
- compatible: "nvidia,tegra20-pcie" or "nvidia,tegra30-pcie"
|
||||
- compatible: Must be one of:
|
||||
- "nvidia,tegra20-pcie"
|
||||
- "nvidia,tegra30-pcie"
|
||||
- "nvidia,tegra124-pcie"
|
||||
- device_type: Must be "pci"
|
||||
- reg: A list of physical base address and length for each set of controller
|
||||
registers. Must contain an entry for each entry in the reg-names property.
|
||||
|
@ -57,6 +60,11 @@ Required properties:
|
|||
- afi
|
||||
- pcie_x
|
||||
|
||||
Required properties on Tegra124 and later:
|
||||
- phys: Must contain an entry for each entry in phy-names.
|
||||
- phy-names: Must include the following entries:
|
||||
- pcie
|
||||
|
||||
Power supplies for Tegra20:
|
||||
- avdd-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
|
||||
- vdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
|
||||
|
@ -84,6 +92,21 @@ Power supplies for Tegra30:
|
|||
- avdd-pexb-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
|
||||
- vdd-pexb-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
|
||||
|
||||
Power supplies for Tegra124:
|
||||
- Required:
|
||||
- avddio-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
|
||||
- dvddio-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
|
||||
- avdd-pex-pll-supply: Power supply for dedicated (internal) PCIe PLL. Must
|
||||
supply 1.05 V.
|
||||
- hvdd-pex-supply: High-voltage supply for PCIe I/O and PCIe output clocks.
|
||||
Must supply 3.3 V.
|
||||
- hvdd-pex-pll-e-supply: High-voltage supply for PLLE (shared with USB3).
|
||||
Must supply 3.3 V.
|
||||
- vddio-pex-ctl-supply: Power supply for PCIe control I/O partition. Must
|
||||
supply 2.8-3.3 V.
|
||||
- avdd-pll-erefe-supply: Power supply for PLLE (shared with USB3). Must
|
||||
supply 1.05 V.
|
||||
|
||||
Root ports are defined as subnodes of the PCIe controller node.
|
||||
|
||||
Required properties:
|
||||
|
|
|
@ -13,9 +13,7 @@ Required Properties:-
|
|||
|
||||
compatibility: "ti,keystone-pcie"
|
||||
reg: index 1 is the base address and length of DW application registers.
|
||||
index 2 is the base address and length of PCI mode configuration
|
||||
register.
|
||||
index 3 is the base address and length of PCI device ID register.
|
||||
index 2 is the base address and length of PCI device ID register.
|
||||
|
||||
pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
|
||||
interrupt-cells: should be set to 1
|
||||
|
@ -63,6 +61,3 @@ Designware DT Properties not applicable for Keystone PCI
|
|||
|
||||
1. pcie_bus clock-names not used. Instead, a phandle to phys is used.
|
||||
|
||||
Note for PCI driver usage
|
||||
=========================
|
||||
Driver requires pci=pcie_bus_perf in the bootargs for proper functioning.
|
||||
|
|
|
@ -257,11 +257,6 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
|
|||
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
|
||||
int ret;
|
||||
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
|
||||
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
|
||||
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
|
||||
|
||||
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
|
||||
if (ret) {
|
||||
dev_err(pp->dev, "unable to enable pcie_phy clock\n");
|
||||
|
@ -283,6 +278,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
|
|||
/* allow the clocks to stabilize */
|
||||
usleep_range(200, 500);
|
||||
|
||||
/* power up core phy and enable ref clock */
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
|
||||
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
|
||||
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
|
||||
|
||||
/* Some boards don't have PCIe reset GPIO. */
|
||||
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||
gpio_set_value(imx6_pcie->reset_gpio, 0);
|
||||
|
|
|
@ -35,15 +35,56 @@
|
|||
#define MAX_MSI_HOST_IRQS 8
|
||||
#define MAX_LEGACY_HOST_IRQS 4
|
||||
|
||||
/* RC mode settings masks */
|
||||
#define PCIE_RC_MODE BIT(2)
|
||||
#define PCIE_MODE_MASK (BIT(1) | BIT(2))
|
||||
|
||||
/* DEV_STAT_CTRL */
|
||||
#define PCIE_CAP_BASE 0x70
|
||||
|
||||
/* PCIE controller device IDs */
|
||||
#define PCIE_RC_K2HK 0xb008
|
||||
#define PCIE_RC_K2E 0xb009
|
||||
#define PCIE_RC_K2L 0xb00a
|
||||
|
||||
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
|
||||
|
||||
static void quirk_limit_mrrs(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_bus *bus = dev->bus;
|
||||
struct pci_dev *bridge = bus->self;
|
||||
static const struct pci_device_id rc_pci_devids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
|
||||
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
if (pci_is_root_bus(bus))
|
||||
return;
|
||||
|
||||
/* look for the host bridge */
|
||||
while (!pci_is_root_bus(bus)) {
|
||||
bridge = bus->self;
|
||||
bus = bus->parent;
|
||||
}
|
||||
|
||||
if (bridge) {
|
||||
/*
|
||||
* Keystone PCI controller has a h/w limitation of
|
||||
* 256 bytes maximum read request size. It can't handle
|
||||
* anything higher than this. So force this limit on
|
||||
* all downstream devices.
|
||||
*/
|
||||
if (pci_match_id(rc_pci_devids, bridge)) {
|
||||
if (pcie_get_readrq(dev) > 256) {
|
||||
dev_info(&dev->dev, "limiting MRRS to 256\n");
|
||||
pcie_set_readrq(dev, 256);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
|
||||
|
||||
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
|
@ -212,8 +253,8 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
|
|||
|
||||
static void __init ks_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
u32 vendor_device_id, val;
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
u32 val;
|
||||
|
||||
ks_pcie_establish_link(ks_pcie);
|
||||
ks_dw_pcie_setup_rc_app_regs(ks_pcie);
|
||||
|
@ -222,8 +263,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
|
|||
pp->dbi_base + PCI_IO_BASE);
|
||||
|
||||
/* update the Vendor ID */
|
||||
vendor_device_id = readl(ks_pcie->va_reg_pciid);
|
||||
writew((vendor_device_id >> 16), pp->dbi_base + PCI_DEVICE_ID);
|
||||
writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
|
||||
|
||||
/* update the DEV_STAT_CTRL to publish right mrrs */
|
||||
val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
|
||||
|
@ -310,7 +350,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
void __iomem *reg_p;
|
||||
struct phy *phy;
|
||||
int ret = 0;
|
||||
u32 val;
|
||||
|
||||
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
|
||||
GFP_KERNEL);
|
||||
|
@ -320,18 +359,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
}
|
||||
pp = &ks_pcie->pp;
|
||||
|
||||
/* index 2 is the devcfg register for RC mode settings */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
||||
reg_p = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(reg_p))
|
||||
return PTR_ERR(reg_p);
|
||||
|
||||
/* enable RC mode in devcfg */
|
||||
val = readl(reg_p);
|
||||
val &= ~PCIE_MODE_MASK;
|
||||
val |= PCIE_RC_MODE;
|
||||
writel(val, reg_p);
|
||||
|
||||
/* initialize SerDes Phy if present */
|
||||
phy = devm_phy_get(dev, "pcie-phy");
|
||||
if (!IS_ERR_OR_NULL(phy)) {
|
||||
|
@ -340,12 +367,14 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* index 3 is to read PCI DEVICE_ID */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
|
||||
/* index 2 is to read PCI DEVICE_ID */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
||||
reg_p = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(reg_p))
|
||||
return PTR_ERR(reg_p);
|
||||
ks_pcie->va_reg_pciid = reg_p;
|
||||
ks_pcie->device_id = readl(reg_p) >> 16;
|
||||
devm_iounmap(dev, reg_p);
|
||||
devm_release_mem_region(dev, res->start, resource_size(res));
|
||||
|
||||
pp->dev = dev;
|
||||
platform_set_drvdata(pdev, ks_pcie);
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
struct keystone_pcie {
|
||||
struct clk *clk;
|
||||
struct pcie_port pp;
|
||||
void __iomem *va_reg_pciid;
|
||||
|
||||
/* PCI Device ID */
|
||||
u32 device_id;
|
||||
int num_legacy_host_irqs;
|
||||
int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
|
||||
struct device_node *legacy_intc_np;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/of_pci.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/sizes.h>
|
||||
|
@ -115,13 +116,20 @@
|
|||
|
||||
#define AFI_INTR_CODE 0xb8
|
||||
#define AFI_INTR_CODE_MASK 0xf
|
||||
#define AFI_INTR_AXI_SLAVE_ERROR 1
|
||||
#define AFI_INTR_AXI_DECODE_ERROR 2
|
||||
#define AFI_INTR_INI_SLAVE_ERROR 1
|
||||
#define AFI_INTR_INI_DECODE_ERROR 2
|
||||
#define AFI_INTR_TARGET_ABORT 3
|
||||
#define AFI_INTR_MASTER_ABORT 4
|
||||
#define AFI_INTR_INVALID_WRITE 5
|
||||
#define AFI_INTR_LEGACY 6
|
||||
#define AFI_INTR_FPCI_DECODE_ERROR 7
|
||||
#define AFI_INTR_AXI_DECODE_ERROR 8
|
||||
#define AFI_INTR_FPCI_TIMEOUT 9
|
||||
#define AFI_INTR_PE_PRSNT_SENSE 10
|
||||
#define AFI_INTR_PE_CLKREQ_SENSE 11
|
||||
#define AFI_INTR_CLKCLAMP_SENSE 12
|
||||
#define AFI_INTR_RDY4PD_SENSE 13
|
||||
#define AFI_INTR_P2P_ERROR 14
|
||||
|
||||
#define AFI_INTR_SIGNATURE 0xbc
|
||||
#define AFI_UPPER_FPCI_ADDRESS 0xc0
|
||||
|
@ -152,8 +160,10 @@
|
|||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
|
||||
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
|
||||
|
||||
#define AFI_FUSE 0x104
|
||||
|
@ -165,12 +175,21 @@
|
|||
#define AFI_PEX_CTRL_RST (1 << 0)
|
||||
#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
|
||||
#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
|
||||
#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
|
||||
|
||||
#define AFI_PLLE_CONTROL 0x160
|
||||
#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
|
||||
#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
|
||||
|
||||
#define AFI_PEXBIAS_CTRL_0 0x168
|
||||
|
||||
#define RP_VEND_XP 0x00000F00
|
||||
#define RP_VEND_XP_DL_UP (1 << 30)
|
||||
|
||||
#define RP_PRIV_MISC 0x00000FE0
|
||||
#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
|
||||
#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
|
||||
|
||||
#define RP_LINK_CONTROL_STATUS 0x00000090
|
||||
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
|
||||
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
|
||||
|
@ -197,6 +216,7 @@
|
|||
|
||||
#define PADS_REFCLK_CFG0 0x000000C8
|
||||
#define PADS_REFCLK_CFG1 0x000000CC
|
||||
#define PADS_REFCLK_BIAS 0x000000D0
|
||||
|
||||
/*
|
||||
* Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
|
||||
|
@ -236,6 +256,7 @@ struct tegra_pcie_soc_data {
|
|||
bool has_pex_bias_ctrl;
|
||||
bool has_intr_prsnt_sense;
|
||||
bool has_cml_clk;
|
||||
bool has_gen2;
|
||||
};
|
||||
|
||||
static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
|
||||
|
@ -268,6 +289,8 @@ struct tegra_pcie {
|
|||
struct reset_control *afi_rst;
|
||||
struct reset_control *pcie_xrst;
|
||||
|
||||
struct phy *phy;
|
||||
|
||||
struct tegra_msi msi;
|
||||
|
||||
struct list_head ports;
|
||||
|
@ -383,7 +406,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
|
|||
for (i = 0; i < 16; i++) {
|
||||
unsigned long virt = (unsigned long)bus->area->addr +
|
||||
i * SZ_64K;
|
||||
phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
|
||||
phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
|
||||
|
||||
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
|
||||
if (err < 0) {
|
||||
|
@ -562,6 +585,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
|
|||
if (soc->has_pex_clkreq_en)
|
||||
value |= AFI_PEX_CTRL_CLKREQ_EN;
|
||||
|
||||
value |= AFI_PEX_CTRL_OVERRIDE_EN;
|
||||
|
||||
afi_writel(port->pcie, value, ctrl);
|
||||
|
||||
tegra_pcie_port_reset(port);
|
||||
|
@ -569,6 +594,7 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
|
|||
|
||||
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
|
||||
{
|
||||
const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
|
||||
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
|
||||
unsigned long value;
|
||||
|
||||
|
@ -579,6 +605,10 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
|
|||
|
||||
/* disable reference clock */
|
||||
value = afi_readl(port->pcie, ctrl);
|
||||
|
||||
if (soc->has_pex_clkreq_en)
|
||||
value &= ~AFI_PEX_CTRL_CLKREQ_EN;
|
||||
|
||||
value &= ~AFI_PEX_CTRL_REFCLK_EN;
|
||||
afi_writel(port->pcie, value, ctrl);
|
||||
}
|
||||
|
@ -694,9 +724,15 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
|
|||
"Target abort",
|
||||
"Master abort",
|
||||
"Invalid write",
|
||||
"Legacy interrupt",
|
||||
"Response decoding error",
|
||||
"AXI response decoding error",
|
||||
"Transaction timeout",
|
||||
"Slot present pin change",
|
||||
"Slot clock request change",
|
||||
"TMS clock ramp change",
|
||||
"TMS ready for power down",
|
||||
"Peer2Peer error",
|
||||
};
|
||||
struct tegra_pcie *pcie = arg;
|
||||
u32 code, signature;
|
||||
|
@ -802,30 +838,27 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
|
|||
afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
|
||||
}
|
||||
|
||||
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
|
||||
static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
|
||||
{
|
||||
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
|
||||
struct tegra_pcie_port *port;
|
||||
unsigned int timeout;
|
||||
unsigned long value;
|
||||
u32 value;
|
||||
|
||||
/* power down PCIe slot clock bias pad */
|
||||
if (soc->has_pex_bias_ctrl)
|
||||
afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
|
||||
timeout = jiffies + msecs_to_jiffies(timeout);
|
||||
|
||||
/* configure mode and disable all ports */
|
||||
value = afi_readl(pcie, AFI_PCIE_CONFIG);
|
||||
value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
|
||||
value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
|
||||
while (time_before(jiffies, timeout)) {
|
||||
value = pads_readl(pcie, soc->pads_pll_ctl);
|
||||
if (value & PADS_PLL_CTL_LOCKDET)
|
||||
return 0;
|
||||
}
|
||||
|
||||
list_for_each_entry(port, &pcie->ports, list)
|
||||
value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
afi_writel(pcie, value, AFI_PCIE_CONFIG);
|
||||
|
||||
value = afi_readl(pcie, AFI_FUSE);
|
||||
value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
|
||||
afi_writel(pcie, value, AFI_FUSE);
|
||||
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
|
||||
{
|
||||
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
|
||||
u32 value;
|
||||
int err;
|
||||
|
||||
/* initialize internal PHY, enable up to 16 PCIE lanes */
|
||||
pads_writel(pcie, 0x0, PADS_CTL_SEL);
|
||||
|
@ -844,6 +877,13 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
|
|||
value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
|
||||
pads_writel(pcie, value, soc->pads_pll_ctl);
|
||||
|
||||
/* reset PLL */
|
||||
value = pads_readl(pcie, soc->pads_pll_ctl);
|
||||
value &= ~PADS_PLL_CTL_RST_B4SM;
|
||||
pads_writel(pcie, value, soc->pads_pll_ctl);
|
||||
|
||||
usleep_range(20, 100);
|
||||
|
||||
/* take PLL out of reset */
|
||||
value = pads_readl(pcie, soc->pads_pll_ctl);
|
||||
value |= PADS_PLL_CTL_RST_B4SM;
|
||||
|
@ -856,15 +896,11 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
|
|||
pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
|
||||
|
||||
/* wait for the PLL to lock */
|
||||
timeout = 300;
|
||||
do {
|
||||
value = pads_readl(pcie, soc->pads_pll_ctl);
|
||||
usleep_range(1000, 2000);
|
||||
if (--timeout == 0) {
|
||||
pr_err("Tegra PCIe error: timeout waiting for PLL\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
} while (!(value & PADS_PLL_CTL_LOCKDET));
|
||||
err = tegra_pcie_pll_wait(pcie, 500);
|
||||
if (err < 0) {
|
||||
dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* turn off IDDQ override */
|
||||
value = pads_readl(pcie, PADS_CTL);
|
||||
|
@ -876,6 +912,58 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
|
|||
value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
|
||||
pads_writel(pcie, value, PADS_CTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
|
||||
{
|
||||
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
|
||||
struct tegra_pcie_port *port;
|
||||
unsigned long value;
|
||||
int err;
|
||||
|
||||
/* enable PLL power down */
|
||||
if (pcie->phy) {
|
||||
value = afi_readl(pcie, AFI_PLLE_CONTROL);
|
||||
value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
|
||||
value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
|
||||
afi_writel(pcie, value, AFI_PLLE_CONTROL);
|
||||
}
|
||||
|
||||
/* power down PCIe slot clock bias pad */
|
||||
if (soc->has_pex_bias_ctrl)
|
||||
afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
|
||||
|
||||
/* configure mode and disable all ports */
|
||||
value = afi_readl(pcie, AFI_PCIE_CONFIG);
|
||||
value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
|
||||
value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
|
||||
|
||||
list_for_each_entry(port, &pcie->ports, list)
|
||||
value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
|
||||
|
||||
afi_writel(pcie, value, AFI_PCIE_CONFIG);
|
||||
|
||||
if (soc->has_gen2) {
|
||||
value = afi_readl(pcie, AFI_FUSE);
|
||||
value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
|
||||
afi_writel(pcie, value, AFI_FUSE);
|
||||
} else {
|
||||
value = afi_readl(pcie, AFI_FUSE);
|
||||
value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
|
||||
afi_writel(pcie, value, AFI_FUSE);
|
||||
}
|
||||
|
||||
if (!pcie->phy)
|
||||
err = tegra_pcie_phy_enable(pcie);
|
||||
else
|
||||
err = phy_power_on(pcie->phy);
|
||||
|
||||
if (err < 0) {
|
||||
dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* take the PCIe interface module out of reset */
|
||||
reset_control_deassert(pcie->pcie_xrst);
|
||||
|
||||
|
@ -909,6 +997,10 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
|
|||
|
||||
/* TODO: disable and unprepare clocks? */
|
||||
|
||||
err = phy_power_off(pcie->phy);
|
||||
if (err < 0)
|
||||
dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
|
||||
|
||||
reset_control_assert(pcie->pcie_xrst);
|
||||
reset_control_assert(pcie->afi_rst);
|
||||
reset_control_assert(pcie->pex_rst);
|
||||
|
@ -1030,6 +1122,19 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
|
|||
return err;
|
||||
}
|
||||
|
||||
pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
|
||||
if (IS_ERR(pcie->phy)) {
|
||||
err = PTR_ERR(pcie->phy);
|
||||
dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = phy_init(pcie->phy);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = tegra_pcie_power_on(pcie);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to power up: %d\n", err);
|
||||
|
@ -1088,10 +1193,17 @@ poweroff:
|
|||
|
||||
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (pcie->irq > 0)
|
||||
free_irq(pcie->irq, pcie);
|
||||
|
||||
tegra_pcie_power_off(pcie);
|
||||
|
||||
err = phy_exit(pcie->phy);
|
||||
if (err < 0)
|
||||
dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1341,7 +1453,19 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
|
|||
{
|
||||
struct device_node *np = pcie->dev->of_node;
|
||||
|
||||
if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
|
||||
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
|
||||
switch (lanes) {
|
||||
case 0x0000104:
|
||||
dev_info(pcie->dev, "4x1, 1x1 configuration\n");
|
||||
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
|
||||
return 0;
|
||||
|
||||
case 0x0000102:
|
||||
dev_info(pcie->dev, "2x1, 1x1 configuration\n");
|
||||
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
|
||||
return 0;
|
||||
}
|
||||
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
|
||||
switch (lanes) {
|
||||
case 0x00000204:
|
||||
dev_info(pcie->dev, "4x1, 2x1 configuration\n");
|
||||
|
@ -1449,7 +1573,23 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
|
|||
struct device_node *np = pcie->dev->of_node;
|
||||
unsigned int i = 0;
|
||||
|
||||
if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
|
||||
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
|
||||
pcie->num_supplies = 7;
|
||||
|
||||
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
|
||||
sizeof(*pcie->supplies),
|
||||
GFP_KERNEL);
|
||||
if (!pcie->supplies)
|
||||
return -ENOMEM;
|
||||
|
||||
pcie->supplies[i++].supply = "avddio-pex";
|
||||
pcie->supplies[i++].supply = "dvddio-pex";
|
||||
pcie->supplies[i++].supply = "avdd-pex-pll";
|
||||
pcie->supplies[i++].supply = "hvdd-pex";
|
||||
pcie->supplies[i++].supply = "hvdd-pex-pll-e";
|
||||
pcie->supplies[i++].supply = "vddio-pex-ctl";
|
||||
pcie->supplies[i++].supply = "avdd-pll-erefe";
|
||||
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
|
||||
bool need_pexa = false, need_pexb = false;
|
||||
|
||||
/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
|
||||
|
@ -1671,6 +1811,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
|
|||
unsigned int retries = 3;
|
||||
unsigned long value;
|
||||
|
||||
/* override presence detection */
|
||||
value = readl(port->base + RP_PRIV_MISC);
|
||||
value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
|
||||
value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
|
||||
writel(value, port->base + RP_PRIV_MISC);
|
||||
|
||||
do {
|
||||
unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
|
||||
|
||||
|
@ -1751,6 +1897,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
|
|||
.has_pex_bias_ctrl = false,
|
||||
.has_intr_prsnt_sense = false,
|
||||
.has_cml_clk = false,
|
||||
.has_gen2 = false,
|
||||
};
|
||||
|
||||
static const struct tegra_pcie_soc_data tegra30_pcie_data = {
|
||||
|
@ -1762,9 +1909,23 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
|
|||
.has_pex_bias_ctrl = true,
|
||||
.has_intr_prsnt_sense = true,
|
||||
.has_cml_clk = true,
|
||||
.has_gen2 = false,
|
||||
};
|
||||
|
||||
static const struct tegra_pcie_soc_data tegra124_pcie_data = {
|
||||
.num_ports = 2,
|
||||
.msi_base_shift = 8,
|
||||
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
|
||||
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
|
||||
.has_pex_clkreq_en = true,
|
||||
.has_pex_bias_ctrl = true,
|
||||
.has_intr_prsnt_sense = true,
|
||||
.has_cml_clk = true,
|
||||
.has_gen2 = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_pcie_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
|
||||
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
|
||||
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
|
||||
{ },
|
||||
|
|
|
@ -423,16 +423,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
|||
|
||||
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
||||
if (cfg_res) {
|
||||
pp->config.cfg0_size = resource_size(cfg_res)/2;
|
||||
pp->config.cfg1_size = resource_size(cfg_res)/2;
|
||||
pp->cfg0_size = resource_size(cfg_res)/2;
|
||||
pp->cfg1_size = resource_size(cfg_res)/2;
|
||||
pp->cfg0_base = cfg_res->start;
|
||||
pp->cfg1_base = cfg_res->start + pp->config.cfg0_size;
|
||||
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
|
||||
|
||||
/* Find the untranslated configuration space address */
|
||||
index = of_property_match_string(np, "reg-names", "config");
|
||||
addrp = of_get_address(np, index, false, false);
|
||||
pp->cfg0_mod_base = of_read_number(addrp, ns);
|
||||
pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size;
|
||||
pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
|
||||
} else {
|
||||
dev_err(pp->dev, "missing *config* reg space\n");
|
||||
}
|
||||
|
@ -455,8 +455,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
|||
IO_SPACE_LIMIT,
|
||||
range.pci_addr + range.size
|
||||
+ global_io_offset);
|
||||
pp->config.io_size = resource_size(&pp->io);
|
||||
pp->config.io_bus_addr = range.pci_addr;
|
||||
pp->io_size = resource_size(&pp->io);
|
||||
pp->io_bus_addr = range.pci_addr;
|
||||
pp->io_base = range.cpu_addr;
|
||||
|
||||
/* Find the untranslated IO space address */
|
||||
|
@ -466,8 +466,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
|||
if (restype == IORESOURCE_MEM) {
|
||||
of_pci_range_to_resource(&range, np, &pp->mem);
|
||||
pp->mem.name = "MEM";
|
||||
pp->config.mem_size = resource_size(&pp->mem);
|
||||
pp->config.mem_bus_addr = range.pci_addr;
|
||||
pp->mem_size = resource_size(&pp->mem);
|
||||
pp->mem_bus_addr = range.pci_addr;
|
||||
|
||||
/* Find the untranslated MEM space address */
|
||||
pp->mem_mod_base = of_read_number(parser.range -
|
||||
|
@ -475,16 +475,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
|||
}
|
||||
if (restype == 0) {
|
||||
of_pci_range_to_resource(&range, np, &pp->cfg);
|
||||
pp->config.cfg0_size = resource_size(&pp->cfg)/2;
|
||||
pp->config.cfg1_size = resource_size(&pp->cfg)/2;
|
||||
pp->cfg0_size = resource_size(&pp->cfg)/2;
|
||||
pp->cfg1_size = resource_size(&pp->cfg)/2;
|
||||
pp->cfg0_base = pp->cfg.start;
|
||||
pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
|
||||
pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
|
||||
|
||||
/* Find the untranslated configuration space address */
|
||||
pp->cfg0_mod_base = of_read_number(parser.range -
|
||||
parser.np + na, ns);
|
||||
pp->cfg1_mod_base = pp->cfg0_mod_base +
|
||||
pp->config.cfg0_size;
|
||||
pp->cfg0_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -512,7 +512,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
|||
if (!pp->va_cfg0_base) {
|
||||
pp->cfg0_base = pp->cfg.start;
|
||||
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
|
||||
pp->config.cfg0_size);
|
||||
pp->cfg0_size);
|
||||
if (!pp->va_cfg0_base) {
|
||||
dev_err(pp->dev, "error with ioremap in function\n");
|
||||
return -ENOMEM;
|
||||
|
@ -520,9 +520,9 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
|||
}
|
||||
|
||||
if (!pp->va_cfg1_base) {
|
||||
pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
|
||||
pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
|
||||
pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
|
||||
pp->config.cfg1_size);
|
||||
pp->cfg1_size);
|
||||
if (!pp->va_cfg1_base) {
|
||||
dev_err(pp->dev, "error with ioremap\n");
|
||||
return -ENOMEM;
|
||||
|
@ -583,7 +583,7 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
|
|||
PCIE_ATU_VIEWPORT);
|
||||
dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1,
|
||||
dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
|
||||
|
@ -599,7 +599,7 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
|
|||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1,
|
||||
dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
|
||||
|
@ -614,10 +614,10 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
|
|||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1,
|
||||
dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
|
||||
dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
|
||||
PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
@ -630,10 +630,10 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
|
|||
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
|
||||
dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
|
||||
dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
|
||||
dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1,
|
||||
dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
|
||||
PCIE_ATU_LIMIT);
|
||||
dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
|
||||
dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
|
||||
dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
|
||||
PCIE_ATU_UPPER_TARGET);
|
||||
dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
}
|
||||
|
@ -768,15 +768,15 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
|
|||
|
||||
pp = sys_to_pcie(sys);
|
||||
|
||||
if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
|
||||
sys->io_offset = global_io_offset - pp->config.io_bus_addr;
|
||||
if (global_io_offset < SZ_1M && pp->io_size > 0) {
|
||||
sys->io_offset = global_io_offset - pp->io_bus_addr;
|
||||
pci_ioremap_io(global_io_offset, pp->io_base);
|
||||
global_io_offset += SZ_64K;
|
||||
pci_add_resource_offset(&sys->resources, &pp->io,
|
||||
sys->io_offset);
|
||||
}
|
||||
|
||||
sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
|
||||
sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
|
||||
pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
|
||||
pci_add_resource(&sys->resources, &pp->busn);
|
||||
|
||||
|
@ -833,7 +833,6 @@ static struct hw_pci dw_pci = {
|
|||
|
||||
void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
{
|
||||
struct pcie_port_info *config = &pp->config;
|
||||
u32 val;
|
||||
u32 membase;
|
||||
u32 memlimit;
|
||||
|
@ -888,7 +887,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
|||
|
||||
/* setup memory base, memory limit */
|
||||
membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
|
||||
memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
|
||||
memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
|
||||
val = memlimit | membase;
|
||||
dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
|
||||
|
||||
|
|
|
@ -14,15 +14,6 @@
|
|||
#ifndef _PCIE_DESIGNWARE_H
|
||||
#define _PCIE_DESIGNWARE_H
|
||||
|
||||
struct pcie_port_info {
|
||||
u32 cfg0_size;
|
||||
u32 cfg1_size;
|
||||
u32 io_size;
|
||||
u32 mem_size;
|
||||
phys_addr_t io_bus_addr;
|
||||
phys_addr_t mem_bus_addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Maximum number of MSI IRQs can be 256 per controller. But keep
|
||||
* it 32 as of now. Probably we will never need more than 32. If needed,
|
||||
|
@ -38,18 +29,23 @@ struct pcie_port {
|
|||
u64 cfg0_base;
|
||||
u64 cfg0_mod_base;
|
||||
void __iomem *va_cfg0_base;
|
||||
u32 cfg0_size;
|
||||
u64 cfg1_base;
|
||||
u64 cfg1_mod_base;
|
||||
void __iomem *va_cfg1_base;
|
||||
u32 cfg1_size;
|
||||
u64 io_base;
|
||||
u64 io_mod_base;
|
||||
phys_addr_t io_bus_addr;
|
||||
u32 io_size;
|
||||
u64 mem_base;
|
||||
u64 mem_mod_base;
|
||||
phys_addr_t mem_bus_addr;
|
||||
u32 mem_size;
|
||||
struct resource cfg;
|
||||
struct resource io;
|
||||
struct resource mem;
|
||||
struct resource busn;
|
||||
struct pcie_port_info config;
|
||||
int irq;
|
||||
u32 lanes;
|
||||
struct pcie_host_ops *ops;
|
||||
|
|
|
@ -359,8 +359,8 @@ static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
|
|||
phys_addr_t msg_addr;
|
||||
|
||||
hwirq = xilinx_pcie_assign_msi(port);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
if (hwirq < 0)
|
||||
return hwirq;
|
||||
|
||||
irq = irq_create_mapping(port->irq_domain, hwirq);
|
||||
if (!irq)
|
||||
|
|
Loading…
Reference in New Issue