2018-01-31 04:56:59 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (c) 2017 Cadence
|
|
|
|
// Cadence PCIe controller driver.
|
|
|
|
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
|
|
|
#include "pcie-cadence.h"
|
|
|
|
|
|
|
|
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
|
|
|
|
u32 r, bool is_io,
|
|
|
|
u64 cpu_addr, u64 pci_addr, size_t size)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* roundup_pow_of_two() returns an unsigned long, which is not suited
|
|
|
|
* for 64bit values.
|
|
|
|
*/
|
|
|
|
u64 sz = 1ULL << fls64(size - 1);
|
|
|
|
int nbits = ilog2(sz);
|
|
|
|
u32 addr0, addr1, desc0, desc1;
|
|
|
|
|
|
|
|
if (nbits < 8)
|
|
|
|
nbits = 8;
|
|
|
|
|
|
|
|
/* Set the PCI address */
|
|
|
|
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
|
|
|
|
(lower_32_bits(pci_addr) & GENMASK(31, 8));
|
|
|
|
addr1 = upper_32_bits(pci_addr);
|
|
|
|
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
|
|
|
|
|
|
|
|
/* Set the PCIe header descriptor */
|
|
|
|
if (is_io)
|
|
|
|
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
|
|
|
|
else
|
|
|
|
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
|
|
|
|
desc1 = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Whatever Bit [23] is set or not inside DESC0 register of the outbound
|
|
|
|
* PCIe descriptor, the PCI function number must be set into
|
|
|
|
* Bits [26:24] of DESC0 anyway.
|
|
|
|
*
|
|
|
|
* In Root Complex mode, the function number is always 0 but in Endpoint
|
|
|
|
* mode, the PCIe controller may support more than one function. This
|
|
|
|
* function number needs to be set properly into the outbound PCIe
|
|
|
|
* descriptor.
|
|
|
|
*
|
|
|
|
* Besides, setting Bit [23] is mandatory when in Root Complex mode:
|
|
|
|
* then the driver must provide the bus, resp. device, number in
|
|
|
|
* Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
|
|
|
|
* number, the device number is always 0 in Root Complex mode.
|
|
|
|
*
|
|
|
|
* However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
|
|
|
|
* the PCIe controller will use the captured values for the bus and
|
|
|
|
* device numbers.
|
|
|
|
*/
|
|
|
|
if (pcie->is_rc) {
|
|
|
|
/* The device and function numbers are always 0. */
|
|
|
|
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
|
|
|
|
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
|
|
|
|
desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Use captured values for bus and device numbers but still
|
|
|
|
* need to set the function number.
|
|
|
|
*/
|
|
|
|
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
|
|
|
|
}
|
|
|
|
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
|
|
|
|
|
|
|
|
/* Set the CPU address */
|
|
|
|
cpu_addr -= pcie->mem_res->start;
|
|
|
|
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
|
|
|
|
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
|
|
|
|
addr1 = upper_32_bits(cpu_addr);
|
|
|
|
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
|
|
|
|
u32 r, u64 cpu_addr)
|
|
|
|
{
|
|
|
|
u32 addr0, addr1, desc0, desc1;
|
|
|
|
|
|
|
|
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
|
|
|
|
desc1 = 0;
|
|
|
|
|
|
|
|
/* See cdns_pcie_set_outbound_region() comments above. */
|
|
|
|
if (pcie->is_rc) {
|
|
|
|
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
|
|
|
|
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
|
|
|
|
desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
|
|
|
|
} else {
|
|
|
|
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the CPU address */
|
|
|
|
cpu_addr -= pcie->mem_res->start;
|
|
|
|
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
|
|
|
|
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
|
|
|
|
addr1 = upper_32_bits(cpu_addr);
|
|
|
|
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
|
|
|
|
{
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
|
|
|
|
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
|
|
|
|
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
|
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
|
|
|
|
}
|
2018-06-25 16:30:50 +08:00
|
|
|
|
|
|
|
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
|
|
|
|
{
|
|
|
|
int i = pcie->phy_count;
|
|
|
|
|
|
|
|
while (i--) {
|
|
|
|
phy_power_off(pcie->phy[i]);
|
|
|
|
phy_exit(pcie->phy[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < pcie->phy_count; i++) {
|
|
|
|
ret = phy_init(pcie->phy[i]);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err_phy;
|
|
|
|
|
|
|
|
ret = phy_power_on(pcie->phy[i]);
|
|
|
|
if (ret < 0) {
|
|
|
|
phy_exit(pcie->phy[i]);
|
|
|
|
goto err_phy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_phy:
|
|
|
|
while (--i >= 0) {
|
|
|
|
phy_power_off(pcie->phy[i]);
|
|
|
|
phy_exit(pcie->phy[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
|
|
|
|
{
|
|
|
|
struct device_node *np = dev->of_node;
|
|
|
|
int phy_count;
|
|
|
|
struct phy **phy;
|
|
|
|
struct device_link **link;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
phy_count = of_property_count_strings(np, "phy-names");
|
|
|
|
if (phy_count < 1) {
|
|
|
|
dev_err(dev, "no phy-names. PHY will not be initialized\n");
|
|
|
|
pcie->phy_count = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
treewide: Replace more open-coded allocation size multiplications
As done treewide earlier, this catches several more open-coded
allocation size calculations that were added to the kernel during the
merge window. This performs the following mechanical transformations
using Coccinelle:
kvmalloc(a * b, ...) -> kvmalloc_array(a, b, ...)
kvzalloc(a * b, ...) -> kvcalloc(a, b, ...)
devm_kzalloc(..., a * b, ...) -> devm_kcalloc(..., a, b, ...)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-10-06 07:21:46 +08:00
|
|
|
phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
|
2018-06-25 16:30:50 +08:00
|
|
|
if (!phy)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
treewide: Replace more open-coded allocation size multiplications
As done treewide earlier, this catches several more open-coded
allocation size calculations that were added to the kernel during the
merge window. This performs the following mechanical transformations
using Coccinelle:
kvmalloc(a * b, ...) -> kvmalloc_array(a, b, ...)
kvzalloc(a * b, ...) -> kvcalloc(a, b, ...)
devm_kzalloc(..., a * b, ...) -> devm_kcalloc(..., a, b, ...)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-10-06 07:21:46 +08:00
|
|
|
link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
|
2018-06-25 16:30:50 +08:00
|
|
|
if (!link)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < phy_count; i++) {
|
|
|
|
of_property_read_string_index(np, "phy-names", i, &name);
|
2018-09-28 18:51:18 +08:00
|
|
|
phy[i] = devm_phy_get(dev, name);
|
|
|
|
if (IS_ERR(phy[i])) {
|
|
|
|
ret = PTR_ERR(phy[i]);
|
|
|
|
goto err_phy;
|
|
|
|
}
|
2018-06-25 16:30:50 +08:00
|
|
|
link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
|
|
|
|
if (!link[i]) {
|
2018-09-28 18:51:18 +08:00
|
|
|
devm_phy_put(dev, phy[i]);
|
2018-06-25 16:30:50 +08:00
|
|
|
ret = -EINVAL;
|
2018-09-28 18:51:18 +08:00
|
|
|
goto err_phy;
|
2018-06-25 16:30:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pcie->phy_count = phy_count;
|
|
|
|
pcie->phy = phy;
|
|
|
|
pcie->link = link;
|
|
|
|
|
|
|
|
ret = cdns_pcie_enable_phy(pcie);
|
|
|
|
if (ret)
|
2018-09-28 18:51:18 +08:00
|
|
|
goto err_phy;
|
2018-06-25 16:30:50 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2018-09-28 18:51:18 +08:00
|
|
|
err_phy:
|
|
|
|
while (--i >= 0) {
|
2018-06-25 16:30:50 +08:00
|
|
|
device_link_del(link[i]);
|
2018-09-28 18:51:18 +08:00
|
|
|
devm_phy_put(dev, phy[i]);
|
|
|
|
}
|
2018-06-25 16:30:50 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2018-06-25 16:30:52 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static int cdns_pcie_suspend_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cdns_pcie *pcie = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
cdns_pcie_disable_phy(pcie);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cdns_pcie_resume_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct cdns_pcie *pcie = dev_get_drvdata(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = cdns_pcie_enable_phy(pcie);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "failed to enable phy\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
const struct dev_pm_ops cdns_pcie_pm_ops = {
|
|
|
|
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
|
|
|
|
cdns_pcie_resume_noirq)
|
|
|
|
};
|