2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-11-16 01:29:08 +08:00
|
|
|
/*
|
|
|
|
* Support PCI/PCIe on PowerNV platforms
|
|
|
|
*
|
|
|
|
* Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
|
|
|
|
*/
|
|
|
|
|
2011-11-30 02:22:53 +08:00
|
|
|
#undef DEBUG
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/pci.h>
|
2014-04-24 16:00:25 +08:00
|
|
|
#include <linux/crash_dump.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/init.h>
|
2018-10-31 06:09:49 +08:00
|
|
|
#include <linux/memblock.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/msi.h>
|
2015-06-05 14:34:56 +08:00
|
|
|
#include <linux/iommu.h>
|
2015-06-05 14:35:13 +08:00
|
|
|
#include <linux/rculist.h>
|
2015-06-05 14:35:20 +08:00
|
|
|
#include <linux/sizes.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm/machdep.h>
|
2013-03-06 05:12:37 +08:00
|
|
|
#include <asm/msi_bitmap.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
#include <asm/ppc-pci.h>
|
|
|
|
#include <asm/opal.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/tce.h>
|
2013-04-26 03:20:59 +08:00
|
|
|
#include <asm/xics.h>
|
2017-02-10 09:04:56 +08:00
|
|
|
#include <asm/debugfs.h>
|
2014-07-21 12:42:30 +08:00
|
|
|
#include <asm/firmware.h>
|
2014-10-08 16:54:57 +08:00
|
|
|
#include <asm/pnv-pci.h>
|
2015-06-05 14:35:17 +08:00
|
|
|
#include <asm/mmzone.h>
|
2014-10-08 16:54:57 +08:00
|
|
|
|
2015-05-27 14:07:16 +08:00
|
|
|
#include <misc/cxl-base.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
#include "powernv.h"
|
|
|
|
#include "pci.h"
|
2018-07-03 17:05:41 +08:00
|
|
|
#include "../../../../drivers/pci/pci.h"
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-05 10:02:13 +08:00
|
|
|
#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
|
|
|
|
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
|
2016-05-03 13:41:33 +08:00
|
|
|
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2018-01-23 19:31:36 +08:00
|
|
|
static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
|
|
|
|
"NPU_OCAPI" };
|
2015-06-05 14:35:17 +08:00
|
|
|
|
2019-06-25 22:52:37 +08:00
|
|
|
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
|
|
|
|
|
2016-04-29 16:55:21 +08:00
|
|
|
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
|
2014-09-22 01:55:06 +08:00
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
struct va_format vaf;
|
|
|
|
va_list args;
|
|
|
|
char pfix[32];
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
|
|
|
|
vaf.fmt = fmt;
|
|
|
|
vaf.va = &args;
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_DEV)
|
2014-09-22 01:55:06 +08:00
|
|
|
strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
|
2015-03-25 16:23:57 +08:00
|
|
|
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
2014-09-22 01:55:06 +08:00
|
|
|
sprintf(pfix, "%04x:%02x ",
|
|
|
|
pci_domain_nr(pe->pbus), pe->pbus->number);
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
else if (pe->flags & PNV_IODA_PE_VF)
|
|
|
|
sprintf(pfix, "%04x:%02x:%2x.%d",
|
|
|
|
pci_domain_nr(pe->parent_dev->bus),
|
|
|
|
(pe->rid & 0xff00) >> 8,
|
|
|
|
PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
|
|
|
|
#endif /* CONFIG_PCI_IOV*/
|
2014-09-22 01:55:06 +08:00
|
|
|
|
2016-11-16 11:02:15 +08:00
|
|
|
printk("%spci %s: [PE# %.2x] %pV",
|
2014-09-22 01:55:06 +08:00
|
|
|
level, pfix, pe->pe_number, &vaf);
|
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2014-10-24 05:19:35 +08:00
|
|
|
static bool pnv_iommu_bypass_disabled __read_mostly;
|
2017-11-18 02:58:59 +08:00
|
|
|
static bool pci_reset_phbs __read_mostly;
|
2014-10-24 05:19:35 +08:00
|
|
|
|
|
|
|
static int __init iommu_setup(char *str)
|
|
|
|
{
|
|
|
|
if (!str)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (*str) {
|
|
|
|
if (!strncmp(str, "nobypass", 8)) {
|
|
|
|
pnv_iommu_bypass_disabled = true;
|
|
|
|
pr_info("PowerNV: IOMMU bypass window disabled.\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
str += strcspn(str, ",");
|
|
|
|
if (*str == ',')
|
|
|
|
str++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("iommu", iommu_setup);
|
|
|
|
|
2017-11-18 02:58:59 +08:00
|
|
|
static int __init pci_reset_phbs_setup(char *str)
|
|
|
|
{
|
|
|
|
pci_reset_phbs = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup);
|
|
|
|
|
2016-07-08 13:55:43 +08:00
|
|
|
static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
2016-07-08 13:55:43 +08:00
|
|
|
/*
|
|
|
|
* WARNING: We cannot rely on the resource flags. The Linux PCI
|
|
|
|
* allocation code sometimes decides to put a 64-bit prefetchable
|
|
|
|
* BAR in the 32-bit window, so we have to compare the addresses.
|
|
|
|
*
|
|
|
|
* For simplicity we only test resource start.
|
|
|
|
*/
|
|
|
|
return (r->start >= phb->ioda.m64_base &&
|
|
|
|
r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
2016-09-14 14:37:17 +08:00
|
|
|
static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
|
|
|
|
{
|
|
|
|
unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
|
|
|
|
|
|
|
|
return (resource_flags & flags) == flags;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
2016-09-28 12:34:56 +08:00
|
|
|
s64 rc;
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
phb->ioda.pe_array[pe_no].phb = phb;
|
|
|
|
phb->ioda.pe_array[pe_no].pe_number = pe_no;
|
|
|
|
|
2016-09-28 12:34:56 +08:00
|
|
|
/*
|
|
|
|
* Clear the PE frozen state as it might be put into frozen state
|
|
|
|
* in the last PCI remove path. It's not harmful to do so when the
|
|
|
|
* PE is already in unfrozen state.
|
|
|
|
*/
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
2016-11-16 09:12:26 +08:00
|
|
|
if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
|
2016-09-28 12:34:56 +08:00
|
|
|
__func__, rc, phb->hose->global_number, pe_no);
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
return &phb->ioda.pe_array[pe_no];
|
|
|
|
}
|
|
|
|
|
2014-11-12 10:36:07 +08:00
|
|
|
static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
2016-05-03 13:41:24 +08:00
|
|
|
if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: Invalid PE %x on PHB#%x\n",
|
2014-11-12 10:36:07 +08:00
|
|
|
__func__, pe_no, phb->hose->global_number);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-06-19 10:26:16 +08:00
|
|
|
if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_debug("%s: PE %x was reserved on PHB#%x\n",
|
2015-06-19 10:26:16 +08:00
|
|
|
__func__, pe_no, phb->hose->global_number);
|
2014-11-12 10:36:07 +08:00
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_init_pe(phb, pe_no);
|
2014-11-12 10:36:07 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2016-08-17 18:03:05 +08:00
|
|
|
long pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-20 14:41:30 +08:00
|
|
|
for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
|
|
|
|
if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
|
|
|
|
return pnv_ioda_init_pe(phb, pe);
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-20 14:41:30 +08:00
|
|
|
return NULL;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_phb *phb = pe->phb;
|
2016-09-06 12:17:18 +08:00
|
|
|
unsigned int pe_num = pe->pe_number;
|
2016-05-03 13:41:36 +08:00
|
|
|
|
|
|
|
WARN_ON(pe->pdev);
|
2019-11-21 21:49:11 +08:00
|
|
|
WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
|
2018-12-19 16:52:26 +08:00
|
|
|
kfree(pe->npucomp);
|
2016-05-03 13:41:36 +08:00
|
|
|
memset(pe, 0, sizeof(struct pnv_ioda_pe));
|
2016-09-06 12:17:18 +08:00
|
|
|
clear_bit(pe_num, phb->ioda.pe_alloc);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* The default M64 BAR is shared by all PEs */
|
|
|
|
static int pnv_ioda2_init_m64(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
const char *desc;
|
|
|
|
struct resource *r;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Configure the default M64 BAR */
|
|
|
|
rc = opal_pci_set_phb_mem_window(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_bar_idx,
|
|
|
|
phb->ioda.m64_base,
|
|
|
|
0, /* unused */
|
|
|
|
phb->ioda.m64_size);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
desc = "configuring";
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable the default M64 BAR */
|
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_bar_idx,
|
|
|
|
OPAL_ENABLE_M64_SPLIT);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
desc = "enabling";
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-20 14:41:32 +08:00
|
|
|
* Exclude the segments for reserved and root bus PE, which
|
|
|
|
* are first or last two PEs.
|
2014-07-21 12:42:30 +08:00
|
|
|
*/
|
|
|
|
r = &phb->hose->mem_resources[1];
|
2016-05-03 13:41:24 +08:00
|
|
|
if (phb->ioda.reserved_pe_idx == 0)
|
2016-05-20 14:41:32 +08:00
|
|
|
r->start += (2 * phb->ioda.m64_segsize);
|
2016-05-03 13:41:24 +08:00
|
|
|
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
|
2016-05-20 14:41:32 +08:00
|
|
|
r->end -= (2 * phb->ioda.m64_segsize);
|
2014-07-21 12:42:30 +08:00
|
|
|
else
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.reserved_pe_idx);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
pr_warn(" Failure %lld %s M64 BAR#%d\n",
|
|
|
|
rc, desc, phb->ioda.m64_bar_idx);
|
|
|
|
opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_bar_idx,
|
|
|
|
OPAL_DISABLE_M64);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:30 +08:00
|
|
|
static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
|
2015-06-19 10:26:17 +08:00
|
|
|
unsigned long *pe_bitmap)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
2015-06-19 10:26:17 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-07-21 12:42:30 +08:00
|
|
|
struct resource *r;
|
2015-06-19 10:26:17 +08:00
|
|
|
resource_size_t base, sgsz, start, end;
|
|
|
|
int segno, i;
|
|
|
|
|
|
|
|
base = phb->ioda.m64_base;
|
|
|
|
sgsz = phb->ioda.m64_segsize;
|
|
|
|
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
|
|
|
|
r = &pdev->resource[i];
|
2016-07-08 13:55:43 +08:00
|
|
|
if (!r->parent || !pnv_pci_is_m64(phb, r))
|
2015-06-19 10:26:17 +08:00
|
|
|
continue;
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2015-06-19 10:26:17 +08:00
|
|
|
start = _ALIGN_DOWN(r->start - base, sgsz);
|
|
|
|
end = _ALIGN_UP(r->end - base, sgsz);
|
|
|
|
for (segno = start / sgsz; segno < end / sgsz; segno++) {
|
|
|
|
if (pe_bitmap)
|
|
|
|
set_bit(segno, pe_bitmap);
|
|
|
|
else
|
|
|
|
pnv_ioda_reserve_pe(phb, segno);
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 10:02:13 +08:00
|
|
|
static int pnv_ioda1_init_m64(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
struct resource *r;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There are 16 M64 BARs, each of which has 8 segments. So
|
|
|
|
* there are as many M64 segments as the maximum number of
|
|
|
|
* PEs, which is 128.
|
|
|
|
*/
|
|
|
|
for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
|
|
|
|
unsigned long base, segsz = phb->ioda.m64_segsize;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
base = phb->ioda.m64_base +
|
|
|
|
index * PNV_IODA1_M64_SEGS * segsz;
|
|
|
|
rc = opal_pci_set_phb_mem_window(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE, index, base, 0,
|
|
|
|
PNV_IODA1_M64_SEGS * segsz);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
rc, phb->hose->global_number, index);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE, index,
|
|
|
|
OPAL_ENABLE_M64_SPLIT);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
rc, phb->hose->global_number, index);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-20 14:41:32 +08:00
|
|
|
* Exclude the segments for reserved and root bus PE, which
|
|
|
|
* are first or last two PEs.
|
2016-05-05 10:02:13 +08:00
|
|
|
*/
|
|
|
|
r = &phb->hose->mem_resources[1];
|
|
|
|
if (phb->ioda.reserved_pe_idx == 0)
|
2016-05-20 14:41:32 +08:00
|
|
|
r->start += (2 * phb->ioda.m64_segsize);
|
2016-05-05 10:02:13 +08:00
|
|
|
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
|
2016-05-20 14:41:32 +08:00
|
|
|
r->end -= (2 * phb->ioda.m64_segsize);
|
2016-05-05 10:02:13 +08:00
|
|
|
else
|
2016-11-16 11:02:15 +08:00
|
|
|
WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
phb->ioda.reserved_pe_idx, phb->hose->global_number);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
for ( ; index >= 0; index--)
|
|
|
|
opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
|
|
|
|
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:30 +08:00
|
|
|
static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
|
|
|
|
unsigned long *pe_bitmap,
|
|
|
|
bool all)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
2015-06-19 10:26:17 +08:00
|
|
|
|
|
|
|
list_for_each_entry(pdev, &bus->devices, bus_list) {
|
2016-05-03 13:41:30 +08:00
|
|
|
pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
|
2015-06-19 10:26:17 +08:00
|
|
|
|
|
|
|
if (all && pdev->subordinate)
|
2016-05-03 13:41:30 +08:00
|
|
|
pnv_ioda_reserve_m64_pe(pdev->subordinate,
|
|
|
|
pe_bitmap, all);
|
2015-06-19 10:26:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
2015-06-19 10:26:19 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-07-21 12:42:30 +08:00
|
|
|
struct pnv_ioda_pe *master_pe, *pe;
|
|
|
|
unsigned long size, *pe_alloc;
|
2015-06-19 10:26:19 +08:00
|
|
|
int i;
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/* Root bus shouldn't use M64 */
|
|
|
|
if (pci_is_root_bus(bus))
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/* Allocate bitmap */
|
2016-05-03 13:41:24 +08:00
|
|
|
size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
|
2014-07-21 12:42:30 +08:00
|
|
|
pe_alloc = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!pe_alloc) {
|
|
|
|
pr_warn("%s: Out of memory !\n",
|
|
|
|
__func__);
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
2015-06-19 10:26:19 +08:00
|
|
|
/* Figure out reserved PE numbers by the PE */
|
2016-05-03 13:41:30 +08:00
|
|
|
pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* the current bus might not own M64 window and that's all
|
|
|
|
* contributed by its child buses. For the case, we needn't
|
|
|
|
* pick M64 dependent PE#.
|
|
|
|
*/
|
2016-05-03 13:41:24 +08:00
|
|
|
if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
|
2014-07-21 12:42:30 +08:00
|
|
|
kfree(pe_alloc);
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Figure out the master PE and put all slave PEs to master
|
|
|
|
* PE's list to form compound PE.
|
|
|
|
*/
|
|
|
|
master_pe = NULL;
|
|
|
|
i = -1;
|
2016-05-03 13:41:24 +08:00
|
|
|
while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
|
|
|
|
phb->ioda.total_pe_num) {
|
2014-07-21 12:42:30 +08:00
|
|
|
pe = &phb->ioda.pe_array[i];
|
|
|
|
|
2016-05-03 13:41:29 +08:00
|
|
|
phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
|
2014-07-21 12:42:30 +08:00
|
|
|
if (!master_pe) {
|
|
|
|
pe->flags |= PNV_IODA_PE_MASTER;
|
|
|
|
INIT_LIST_HEAD(&pe->slaves);
|
|
|
|
master_pe = pe;
|
|
|
|
} else {
|
|
|
|
pe->flags |= PNV_IODA_PE_SLAVE;
|
|
|
|
pe->master = master_pe;
|
|
|
|
list_add_tail(&pe->list, &master_pe->slaves);
|
|
|
|
}
|
2016-05-05 10:02:13 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* P7IOC supports M64DT, which helps mapping M64 segment
|
|
|
|
* to one particular PE#. However, PHB3 has fixed mapping
|
|
|
|
* between M64 segment and PE#. In order to have same logic
|
|
|
|
* for P7IOC and PHB3, we enforce fixed mapping between M64
|
|
|
|
* segment and PE# on P7IOC.
|
|
|
|
*/
|
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe->pe_number, OPAL_M64_WINDOW_TYPE,
|
|
|
|
pe->pe_number / PNV_IODA1_M64_SEGS,
|
|
|
|
pe->pe_number % PNV_IODA1_M64_SEGS);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
__func__, rc, phb->hose->global_number,
|
|
|
|
pe->pe_number);
|
|
|
|
}
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
kfree(pe_alloc);
|
2016-05-03 13:41:36 +08:00
|
|
|
return master_pe;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = phb->hose;
|
|
|
|
struct device_node *dn = hose->dn;
|
|
|
|
struct resource *res;
|
2016-07-08 14:37:16 +08:00
|
|
|
u32 m64_range[2], i;
|
2016-08-02 12:10:35 +08:00
|
|
|
const __be32 *r;
|
2014-07-21 12:42:30 +08:00
|
|
|
u64 pci_addr;
|
|
|
|
|
2016-05-05 10:02:13 +08:00
|
|
|
if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
|
2014-11-12 10:36:04 +08:00
|
|
|
pr_info(" Not support M64 window\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-12-09 14:18:20 +08:00
|
|
|
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
|
2014-07-21 12:42:30 +08:00
|
|
|
pr_info(" Firmware too old to support M64 window\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = of_get_property(dn, "ibm,opal-m64-window", NULL);
|
|
|
|
if (!r) {
|
2017-08-21 23:16:47 +08:00
|
|
|
pr_info(" No <ibm,opal-m64-window> on %pOF\n",
|
|
|
|
dn);
|
2014-07-21 12:42:30 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:16 +08:00
|
|
|
/*
|
|
|
|
* Find the available M64 BAR range and pickup the last one for
|
|
|
|
* covering the whole 64-bits space. We support only one range.
|
|
|
|
*/
|
|
|
|
if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
|
|
|
|
m64_range, 2)) {
|
|
|
|
/* In absence of the property, assume 0..15 */
|
|
|
|
m64_range[0] = 0;
|
|
|
|
m64_range[1] = 16;
|
|
|
|
}
|
|
|
|
/* We only support 64 bits in our allocator */
|
|
|
|
if (m64_range[1] > 63) {
|
|
|
|
pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
|
|
|
|
__func__, m64_range[1], phb->hose->global_number);
|
|
|
|
m64_range[1] = 63;
|
|
|
|
}
|
|
|
|
/* Empty range, no m64 */
|
|
|
|
if (m64_range[1] <= m64_range[0]) {
|
|
|
|
pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
|
|
|
|
__func__, phb->hose->global_number);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure M64 informations */
|
2014-07-21 12:42:30 +08:00
|
|
|
res = &hose->mem_resources[1];
|
2015-10-22 09:03:08 +08:00
|
|
|
res->name = dn->full_name;
|
2014-07-21 12:42:30 +08:00
|
|
|
res->start = of_translate_address(dn, r + 2);
|
|
|
|
res->end = res->start + of_read_number(r + 4, 2) - 1;
|
|
|
|
res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
|
|
|
|
pci_addr = of_read_number(r, 2);
|
|
|
|
hose->mem_offset[1] = res->start - pci_addr;
|
|
|
|
|
|
|
|
phb->ioda.m64_size = resource_size(res);
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
|
2014-07-21 12:42:30 +08:00
|
|
|
phb->ioda.m64_base = pci_addr;
|
|
|
|
|
2016-07-08 14:37:16 +08:00
|
|
|
/* This lines up nicely with the display from processing OF ranges */
|
|
|
|
pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
|
|
|
|
res->start, res->end, pci_addr, m64_range[0],
|
|
|
|
m64_range[0] + m64_range[1] - 1);
|
|
|
|
|
|
|
|
/* Mark all M64 used up by default */
|
|
|
|
phb->ioda.m64_bar_alloc = (unsigned long)-1;
|
2014-12-12 12:39:37 +08:00
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* Use last M64 BAR to cover M64 window */
|
2016-07-08 14:37:16 +08:00
|
|
|
m64_range[1]--;
|
|
|
|
phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
|
|
|
|
|
|
|
|
pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
|
|
|
|
|
|
|
|
/* Mark remaining ones free */
|
|
|
|
for (i = m64_range[0]; i < m64_range[1]; i++)
|
|
|
|
clear_bit(i, &phb->ioda.m64_bar_alloc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup init functions for M64 based on IODA version, IODA3 uses
|
|
|
|
* the IODA2 code.
|
|
|
|
*/
|
2016-05-05 10:02:13 +08:00
|
|
|
if (phb->type == PNV_PHB_IODA1)
|
|
|
|
phb->init_m64 = pnv_ioda1_init_m64;
|
|
|
|
else
|
|
|
|
phb->init_m64 = pnv_ioda2_init_m64;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
2014-07-21 12:42:33 +08:00
|
|
|
static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
|
|
|
|
struct pnv_ioda_pe *slave;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Fetch master PE */
|
|
|
|
if (pe->flags & PNV_IODA_PE_SLAVE) {
|
|
|
|
pe = pe->master;
|
2014-11-12 10:36:10 +08:00
|
|
|
if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
|
|
|
|
return;
|
|
|
|
|
2014-07-21 12:42:33 +08:00
|
|
|
pe_no = pe->pe_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Freeze master PE */
|
|
|
|
rc = opal_pci_eeh_freeze_set(phb->opal_id,
|
|
|
|
pe_no,
|
|
|
|
OPAL_EEH_ACTION_SET_FREEZE_ALL);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, phb->hose->global_number, pe_no);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Freeze slave PEs */
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
rc = opal_pci_eeh_freeze_set(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
OPAL_EEH_ACTION_SET_FREEZE_ALL);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, phb->hose->global_number,
|
|
|
|
slave->pe_number);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
|
2014-07-21 12:42:33 +08:00
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe, *slave;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Find master PE */
|
|
|
|
pe = &phb->ioda.pe_array[pe_no];
|
|
|
|
if (pe->flags & PNV_IODA_PE_SLAVE) {
|
|
|
|
pe = pe->master;
|
|
|
|
WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
|
|
|
|
pe_no = pe->pe_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear frozen state for master PE */
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, opt, phb->hose->global_number, pe_no);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Clear frozen state for slave PEs */
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
opt);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, opt, phb->hose->global_number,
|
|
|
|
slave->pe_number);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *slave, *pe;
|
2018-11-19 12:25:17 +08:00
|
|
|
u8 fstate = 0, state;
|
|
|
|
__be16 pcierr = 0;
|
2014-07-21 12:42:33 +08:00
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Sanity check on PE number */
|
2016-05-03 13:41:24 +08:00
|
|
|
if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
|
2014-07-21 12:42:33 +08:00
|
|
|
return OPAL_EEH_STOPPED_PERM_UNAVAIL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch the master PE and the PE instance might be
|
|
|
|
* not initialized yet.
|
|
|
|
*/
|
|
|
|
pe = &phb->ioda.pe_array[pe_no];
|
|
|
|
if (pe->flags & PNV_IODA_PE_SLAVE) {
|
|
|
|
pe = pe->master;
|
|
|
|
WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
|
|
|
|
pe_no = pe->pe_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the master PE */
|
|
|
|
rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
|
|
|
|
&state, &pcierr, NULL);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld getting "
|
|
|
|
"PHB#%x-PE#%x state\n",
|
|
|
|
__func__, rc,
|
|
|
|
phb->hose->global_number, pe_no);
|
|
|
|
return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the slave PE */
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return state;
|
|
|
|
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
rc = opal_pci_eeh_freeze_status(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
&fstate,
|
|
|
|
&pcierr,
|
|
|
|
NULL);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld getting "
|
|
|
|
"PHB#%x-PE#%x state\n",
|
|
|
|
__func__, rc,
|
|
|
|
phb->hose->global_number, slave->pe_number);
|
|
|
|
return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Override the result based on the ascending
|
|
|
|
* priority.
|
|
|
|
*/
|
|
|
|
if (fstate > state)
|
|
|
|
state = fstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2016-07-14 05:17:00 +08:00
|
|
|
struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
if (!pdn)
|
|
|
|
return NULL;
|
|
|
|
if (pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return NULL;
|
|
|
|
return &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
}
|
|
|
|
|
2014-11-12 10:36:08 +08:00
|
|
|
static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *parent,
|
|
|
|
struct pnv_ioda_pe *child,
|
|
|
|
bool is_add)
|
|
|
|
{
|
|
|
|
const char *desc = is_add ? "adding" : "removing";
|
|
|
|
uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
|
|
|
|
OPAL_REMOVE_PE_FROM_DOMAIN;
|
|
|
|
struct pnv_ioda_pe *slave;
|
|
|
|
long rc;
|
|
|
|
|
|
|
|
/* Parent PE affects child PE */
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
|
|
|
|
child->pe_number, op);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
|
|
|
|
rc, desc);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(child->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Compound case: parent PE affects slave PEs */
|
|
|
|
list_for_each_entry(slave, &child->slaves, list) {
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
|
|
|
|
slave->pe_number, op);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
|
|
|
|
rc, desc);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_ioda_set_peltv(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe,
|
|
|
|
bool is_add)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *slave;
|
2015-03-25 16:23:57 +08:00
|
|
|
struct pci_dev *pdev = NULL;
|
2014-11-12 10:36:08 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear PE frozen state. If it's master PE, we need
|
|
|
|
* clear slave PE frozen state as well.
|
|
|
|
*/
|
|
|
|
if (is_add) {
|
|
|
|
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
if (pe->flags & PNV_IODA_PE_MASTER) {
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list)
|
|
|
|
opal_pci_eeh_freeze_clear(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Associate PE in PELT. We need add the PE into the
|
|
|
|
* corresponding PELT-V as well. Otherwise, the error
|
|
|
|
* originated from the PE might contribute to other
|
|
|
|
* PEs.
|
|
|
|
*/
|
|
|
|
ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* For compound PEs, any one affects all of them */
|
|
|
|
if (pe->flags & PNV_IODA_PE_MASTER) {
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
|
|
|
|
pdev = pe->pbus->self;
|
2015-03-25 16:23:57 +08:00
|
|
|
else if (pe->flags & PNV_IODA_PE_DEV)
|
2014-11-12 10:36:08 +08:00
|
|
|
pdev = pe->pdev->bus->self;
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
else if (pe->flags & PNV_IODA_PE_VF)
|
2015-06-22 11:45:47 +08:00
|
|
|
pdev = pe->parent_dev;
|
2015-03-25 16:23:57 +08:00
|
|
|
#endif /* CONFIG_PCI_IOV */
|
2014-11-12 10:36:08 +08:00
|
|
|
while (pdev) {
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *parent;
|
|
|
|
|
|
|
|
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
|
|
|
|
parent = &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pdev = pdev->bus->self;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-21 21:49:11 +08:00
|
|
|
static void pnv_ioda_unset_peltv(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe,
|
|
|
|
struct pci_dev *parent)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
while (parent) {
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(parent);
|
|
|
|
|
|
|
|
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
|
|
|
|
pe->pe_number,
|
|
|
|
OPAL_REMOVE_PE_FROM_DOMAIN);
|
|
|
|
/* XXX What to do in case of error ? */
|
|
|
|
}
|
|
|
|
parent = parent->bus->self;
|
|
|
|
}
|
|
|
|
|
|
|
|
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
|
|
|
|
/* Disassociate PE in PELT */
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
|
|
|
|
pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
|
|
|
|
if (rc)
|
|
|
|
pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pci_dev *parent;
|
|
|
|
uint8_t bcomp, dcomp, fcomp;
|
|
|
|
int64_t rc;
|
|
|
|
long rid_end, rid;
|
|
|
|
|
|
|
|
/* Currently, we just deconfigure VF PE. Bus PE will always there.*/
|
|
|
|
if (pe->pbus) {
|
|
|
|
int count;
|
|
|
|
|
|
|
|
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
|
|
|
|
parent = pe->pbus->self;
|
|
|
|
if (pe->flags & PNV_IODA_PE_BUS_ALL)
|
2020-01-02 01:49:50 +08:00
|
|
|
count = resource_size(&pe->pbus->busn_res);
|
2015-03-25 16:23:57 +08:00
|
|
|
else
|
|
|
|
count = 1;
|
|
|
|
|
|
|
|
switch(count) {
|
|
|
|
case 1: bcomp = OpalPciBusAll; break;
|
|
|
|
case 2: bcomp = OpalPciBus7Bits; break;
|
|
|
|
case 4: bcomp = OpalPciBus6Bits; break;
|
|
|
|
case 8: bcomp = OpalPciBus5Bits; break;
|
|
|
|
case 16: bcomp = OpalPciBus4Bits; break;
|
|
|
|
case 32: bcomp = OpalPciBus3Bits; break;
|
|
|
|
default:
|
|
|
|
dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
|
|
|
|
count);
|
|
|
|
/* Do an exact match only */
|
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
}
|
|
|
|
rid_end = pe->rid + (count << 8);
|
|
|
|
} else {
|
2016-05-20 14:41:34 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2015-03-25 16:23:57 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_VF)
|
|
|
|
parent = pe->parent_dev;
|
|
|
|
else
|
2016-05-20 14:41:34 +08:00
|
|
|
#endif
|
2015-03-25 16:23:57 +08:00
|
|
|
parent = pe->pdev->bus->self;
|
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
|
|
|
|
rid_end = pe->rid + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear the reverse map */
|
|
|
|
for (rid = pe->rid; rid < rid_end; rid++)
|
2016-05-20 14:41:29 +08:00
|
|
|
phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2019-11-21 21:49:11 +08:00
|
|
|
/*
|
|
|
|
* Release from all parents PELT-V. NPUs don't have a PELTV
|
|
|
|
* table
|
|
|
|
*/
|
|
|
|
if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
|
|
|
|
pnv_ioda_unset_peltv(phb, pe, parent);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
|
|
|
|
bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
|
|
|
|
if (rc)
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
pe->pbus = NULL;
|
|
|
|
pe->pdev = NULL;
|
2016-05-20 14:41:34 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2015-03-25 16:23:57 +08:00
|
|
|
pe->parent_dev = NULL;
|
2016-05-20 14:41:34 +08:00
|
|
|
#endif
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *parent;
|
|
|
|
uint8_t bcomp, dcomp, fcomp;
|
|
|
|
long rc, rid_end, rid;
|
|
|
|
|
|
|
|
/* Bus validation ? */
|
|
|
|
if (pe->pbus) {
|
|
|
|
int count;
|
|
|
|
|
|
|
|
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
|
|
|
|
parent = pe->pbus->self;
|
2012-08-20 11:49:14 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_BUS_ALL)
|
2020-01-02 01:49:50 +08:00
|
|
|
count = resource_size(&pe->pbus->busn_res);
|
2012-08-20 11:49:14 +08:00
|
|
|
else
|
|
|
|
count = 1;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
switch(count) {
|
|
|
|
case 1: bcomp = OpalPciBusAll; break;
|
|
|
|
case 2: bcomp = OpalPciBus7Bits; break;
|
|
|
|
case 4: bcomp = OpalPciBus6Bits; break;
|
|
|
|
case 8: bcomp = OpalPciBus5Bits; break;
|
|
|
|
case 16: bcomp = OpalPciBus4Bits; break;
|
|
|
|
case 32: bcomp = OpalPciBus3Bits; break;
|
|
|
|
default:
|
2015-03-25 16:23:57 +08:00
|
|
|
dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
|
|
|
|
count);
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Do an exact match only */
|
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
}
|
|
|
|
rid_end = pe->rid + (count << 8);
|
|
|
|
} else {
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
if (pe->flags & PNV_IODA_PE_VF)
|
|
|
|
parent = pe->parent_dev;
|
|
|
|
else
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
parent = pe->pdev->bus->self;
|
2011-11-16 01:29:08 +08:00
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
|
|
|
|
rid_end = pe->rid + 1;
|
|
|
|
}
|
|
|
|
|
2013-11-04 16:32:46 +08:00
|
|
|
/*
|
|
|
|
* Associate PE in PELT. We need add the PE into the
|
|
|
|
* corresponding PELT-V as well. Otherwise, the error
|
|
|
|
* originated from the PE might contribute to other
|
|
|
|
* PEs.
|
|
|
|
*/
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
|
|
|
|
bcomp, dcomp, fcomp, OPAL_MAP_PE);
|
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2013-11-04 16:32:46 +08:00
|
|
|
|
2015-12-17 10:43:13 +08:00
|
|
|
/*
|
|
|
|
* Configure PELTV. NPUs don't have a PELTV table so skip
|
|
|
|
* configuration on them.
|
|
|
|
*/
|
2018-01-23 19:31:36 +08:00
|
|
|
if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
|
2015-12-17 10:43:13 +08:00
|
|
|
pnv_ioda_set_peltv(phb, pe, true);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Setup reverse map */
|
|
|
|
for (rid = pe->rid; rid < rid_end; rid++)
|
|
|
|
phb->ioda.pe_rmap[rid] = pe->pe_number;
|
|
|
|
|
|
|
|
/* Setup one MVTs on IODA1 */
|
2014-11-12 10:36:09 +08:00
|
|
|
if (phb->type != PNV_PHB_IODA1) {
|
|
|
|
pe->mve_number = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
pe->mve_number = pe->pe_number;
|
|
|
|
rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_err(pe, "OPAL error %ld setting up MVE %x\n",
|
2014-11-12 10:36:09 +08:00
|
|
|
rc, pe->mve_number);
|
|
|
|
pe->mve_number = -1;
|
|
|
|
} else {
|
|
|
|
rc = opal_pci_set_mve_enable(phb->opal_id,
|
|
|
|
pe->mve_number, OPAL_ENABLE_MVE);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (rc) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_err(pe, "OPAL error %ld enabling MVE %x\n",
|
2011-11-16 01:29:08 +08:00
|
|
|
rc, pe->mve_number);
|
|
|
|
pe->mve_number = -1;
|
|
|
|
}
|
2014-11-12 10:36:09 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2014-11-12 10:36:09 +08:00
|
|
|
out:
|
2011-11-16 01:29:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
|
|
|
|
{
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
|
|
|
int i;
|
|
|
|
struct resource *res, res2;
|
|
|
|
resource_size_t size;
|
|
|
|
u16 num_vfs;
|
|
|
|
|
|
|
|
if (!dev->is_physfn)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "offset" is in VFs. The M64 windows are sized so that when they
|
|
|
|
* are segmented, each segment is the same size as the IOV BAR.
|
|
|
|
* Each segment is in a separate PE, and the high order bits of the
|
|
|
|
* address are the PE number. Therefore, each VF's BAR is in a
|
|
|
|
* separate PE, and changing the IOV BAR start address changes the
|
|
|
|
* range of PEs the VFs are in.
|
|
|
|
*/
|
|
|
|
num_vfs = pdn->num_vfs;
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &dev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || !res->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The actual IOV BAR range is determined by the start address
|
|
|
|
* and the actual size for num_vfs VFs BAR. This check is to
|
|
|
|
* make sure that after shifting, the range will not overlap
|
|
|
|
* with another device.
|
|
|
|
*/
|
|
|
|
size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
|
|
|
|
res2.flags = res->flags;
|
|
|
|
res2.start = res->start + (size * offset);
|
|
|
|
res2.end = res2.start + (size * num_vfs) - 1;
|
|
|
|
|
|
|
|
if (res2.end > res->end) {
|
|
|
|
dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
|
|
|
|
i, &res2, res, num_vfs, offset);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-09-27 14:52:31 +08:00
|
|
|
* Since M64 BAR shares segments among all possible 256 PEs,
|
|
|
|
* we have to shift the beginning of PF IOV BAR to make it start from
|
|
|
|
* the segment which belongs to the PE number assigned to the first VF.
|
|
|
|
* This creates a "hole" in the /proc/iomem which could be used for
|
|
|
|
* allocating other resources so we reserve this area below and
|
|
|
|
* release when IOV is released.
|
2015-03-25 16:23:57 +08:00
|
|
|
*/
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &dev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || !res->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
|
|
|
|
res2 = *res;
|
|
|
|
res->start += size * offset;
|
|
|
|
|
2015-07-20 18:14:58 +08:00
|
|
|
dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
|
|
|
|
i, &res2, res, (offset > 0) ? "En" : "Dis",
|
|
|
|
num_vfs, offset);
|
2017-09-27 14:52:31 +08:00
|
|
|
|
|
|
|
if (offset < 0) {
|
|
|
|
devm_release_resource(&dev->dev, &pdn->holes[i]);
|
|
|
|
memset(&pdn->holes[i], 0, sizeof(pdn->holes[i]));
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
pci_update_resource(dev, i + PCI_IOV_RESOURCES);
|
2017-09-27 14:52:31 +08:00
|
|
|
|
|
|
|
if (offset > 0) {
|
|
|
|
pdn->holes[i].start = res2.start;
|
|
|
|
pdn->holes[i].end = res2.start + size * offset - 1;
|
|
|
|
pdn->holes[i].flags = IORESOURCE_BUS;
|
|
|
|
pdn->holes[i].name = "pnv_iov_reserved";
|
|
|
|
devm_request_resource(&dev->dev, res->parent,
|
|
|
|
&pdn->holes[i]);
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
if (!pdn) {
|
|
|
|
pr_err("%s: Device tree node not associated properly\n",
|
|
|
|
pci_name(dev));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (pdn->pe_number != IODA_INVALID_PE)
|
|
|
|
return NULL;
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
pe = pnv_ioda_alloc_pe(phb);
|
|
|
|
if (!pe) {
|
2016-10-25 12:00:08 +08:00
|
|
|
pr_warn("%s: Not enough PE# available, disabling device\n",
|
|
|
|
pci_name(dev));
|
2011-11-16 01:29:08 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-21 21:49:08 +08:00
|
|
|
/* NOTE: We don't get a reference for the pointer in the PE
|
|
|
|
* data structure, both the device and PE structures should be
|
|
|
|
* destroyed at the same time. However, removing nvlink
|
|
|
|
* devices will need some work.
|
2011-11-16 01:29:08 +08:00
|
|
|
*
|
|
|
|
* At some point we want to remove the PDN completely anyways
|
|
|
|
*/
|
2016-05-03 13:41:36 +08:00
|
|
|
pdn->pe_number = pe->pe_number;
|
2015-12-17 10:43:13 +08:00
|
|
|
pe->flags = PNV_IODA_PE_DEV;
|
2011-11-16 01:29:08 +08:00
|
|
|
pe->pdev = dev;
|
|
|
|
pe->pbus = NULL;
|
|
|
|
pe->mve_number = -1;
|
|
|
|
pe->rid = dev->bus->number << 8 | pdn->devfn;
|
2019-11-21 21:49:11 +08:00
|
|
|
pe->device_count++;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
pe_info(pe, "Associated device to PE\n");
|
|
|
|
|
|
|
|
if (pnv_ioda_configure_pe(phb, pe)) {
|
|
|
|
/* XXX What do we do here ? */
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2011-11-16 01:29:08 +08:00
|
|
|
pdn->pe_number = IODA_INVALID_PE;
|
|
|
|
pe->pdev = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-12 13:47:10 +08:00
|
|
|
/* Put PE to the list */
|
2019-11-21 21:49:09 +08:00
|
|
|
mutex_lock(&phb->ioda.pe_list_mutex);
|
2016-05-12 13:47:10 +08:00
|
|
|
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
2019-11-21 21:49:09 +08:00
|
|
|
mutex_unlock(&phb->ioda.pe_list_mutex);
|
2011-11-16 01:29:08 +08:00
|
|
|
return pe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
if (pdn == NULL) {
|
|
|
|
pr_warn("%s: No device node associated with device !\n",
|
|
|
|
pci_name(dev));
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-20 14:41:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In partial hotplug case, the PCI device might be still
|
|
|
|
* associated with the PE and needn't attach it to the PE
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (pdn->pe_number != IODA_INVALID_PE)
|
|
|
|
continue;
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
pe->device_count++;
|
2011-11-16 01:29:08 +08:00
|
|
|
pdn->pe_number = pe->pe_number;
|
2012-08-20 11:49:14 +08:00
|
|
|
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
2011-11-16 01:29:08 +08:00
|
|
|
pnv_ioda_setup_same_PE(dev->subordinate, pe);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-20 11:49:14 +08:00
|
|
|
/*
|
|
|
|
* There're 2 types of PCI bus sensitive PEs: One that is compromised of
|
|
|
|
* single PCI bus. Another one that contains the primary PCI bus and its
|
|
|
|
* subordinate PCI devices and buses. The second type of PE is normally
|
|
|
|
* orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
|
|
|
|
*/
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2012-08-20 11:49:14 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
2011-11-16 01:29:08 +08:00
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_ioda_pe *pe = NULL;
|
2016-05-20 14:41:31 +08:00
|
|
|
unsigned int pe_num;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In partial hotplug case, the PE instance might be still alive.
|
|
|
|
* We should reuse it instead of allocating a new one.
|
|
|
|
*/
|
|
|
|
pe_num = phb->ioda.pe_rmap[bus->number << 8];
|
|
|
|
if (pe_num != IODA_INVALID_PE) {
|
|
|
|
pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
pnv_ioda_setup_same_PE(bus, pe);
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2016-05-20 14:41:32 +08:00
|
|
|
/* PE number for root bus should have been reserved */
|
|
|
|
if (pci_is_root_bus(bus) &&
|
|
|
|
phb->ioda.root_pe_idx != IODA_INVALID_PE)
|
|
|
|
pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* Check if PE is determined by M64 */
|
2018-10-16 10:34:09 +08:00
|
|
|
if (!pe)
|
|
|
|
pe = pnv_ioda_pick_m64_pe(bus, all);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/* The PE number isn't pinned by M64 */
|
2016-05-03 13:41:36 +08:00
|
|
|
if (!pe)
|
|
|
|
pe = pnv_ioda_alloc_pe(phb);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
if (!pe) {
|
2016-10-25 12:00:08 +08:00
|
|
|
pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n",
|
2012-08-20 11:49:14 +08:00
|
|
|
__func__, pci_domain_nr(bus), bus->number);
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
|
2011-11-16 01:29:08 +08:00
|
|
|
pe->pbus = bus;
|
|
|
|
pe->pdev = NULL;
|
|
|
|
pe->mve_number = -1;
|
2012-05-18 09:51:11 +08:00
|
|
|
pe->rid = bus->busn_res.start << 8;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2012-08-20 11:49:14 +08:00
|
|
|
if (all)
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n",
|
|
|
|
&bus->busn_res.start, &bus->busn_res.end,
|
|
|
|
pe->pe_number);
|
2012-08-20 11:49:14 +08:00
|
|
|
else
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_info(pe, "Secondary bus %pad associated with PE#%x\n",
|
|
|
|
&bus->busn_res.start, pe->pe_number);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
if (pnv_ioda_configure_pe(phb, pe)) {
|
|
|
|
/* XXX What do we do here ? */
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2011-11-16 01:29:08 +08:00
|
|
|
pe->pbus = NULL;
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Associate it with all child devices */
|
|
|
|
pnv_ioda_setup_same_PE(bus, pe);
|
|
|
|
|
2012-08-20 11:49:15 +08:00
|
|
|
/* Put PE to the list */
|
|
|
|
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
2016-05-03 13:41:36 +08:00
|
|
|
|
|
|
|
return pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2016-01-11 13:53:49 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
|
|
|
|
{
|
|
|
|
int pe_num, found_pe = false, rc;
|
|
|
|
long rid;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
struct pci_dev *gpu_pdev;
|
|
|
|
struct pci_dn *npu_pdn;
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
2019-11-21 21:49:08 +08:00
|
|
|
/*
|
|
|
|
* Intentionally leak a reference on the npu device (for
|
|
|
|
* nvlink only; this is not an opencapi path) to make sure it
|
|
|
|
* never goes away, as it's been the case all along and some
|
|
|
|
* work is needed otherwise.
|
|
|
|
*/
|
|
|
|
pci_dev_get(npu_pdev);
|
|
|
|
|
2016-01-11 13:53:49 +08:00
|
|
|
/*
|
|
|
|
* Due to a hardware errata PE#0 on the NPU is reserved for
|
|
|
|
* error handling. This means we only have three PEs remaining
|
|
|
|
* which need to be assigned to four links, implying some
|
|
|
|
* links must share PEs.
|
|
|
|
*
|
|
|
|
* To achieve this we assign PEs such that NPUs linking the
|
|
|
|
* same GPU get assigned the same PE.
|
|
|
|
*/
|
|
|
|
gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
|
2016-05-03 13:41:24 +08:00
|
|
|
for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
|
2016-01-11 13:53:49 +08:00
|
|
|
pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
if (!pe->pdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
|
|
|
|
/*
|
|
|
|
* This device has the same peer GPU so should
|
|
|
|
* be assigned the same PE as the existing
|
|
|
|
* peer NPU.
|
|
|
|
*/
|
|
|
|
dev_info(&npu_pdev->dev,
|
2016-11-16 11:02:15 +08:00
|
|
|
"Associating to existing PE %x\n", pe_num);
|
2016-01-11 13:53:49 +08:00
|
|
|
npu_pdn = pci_get_pdn(npu_pdev);
|
|
|
|
rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
|
|
|
|
npu_pdn->pe_number = pe_num;
|
|
|
|
phb->ioda.pe_rmap[rid] = pe->pe_number;
|
2019-11-21 21:49:11 +08:00
|
|
|
pe->device_count++;
|
2016-01-11 13:53:49 +08:00
|
|
|
|
|
|
|
/* Map the PE to this link */
|
|
|
|
rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
|
|
|
|
OpalPciBusAll,
|
|
|
|
OPAL_COMPARE_RID_DEVICE_NUMBER,
|
|
|
|
OPAL_COMPARE_RID_FUNCTION_NUMBER,
|
|
|
|
OPAL_MAP_PE);
|
|
|
|
WARN_ON(rc != OPAL_SUCCESS);
|
|
|
|
found_pe = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found_pe)
|
|
|
|
/*
|
|
|
|
* Could not find an existing PE so allocate a new
|
|
|
|
* one.
|
|
|
|
*/
|
|
|
|
return pnv_ioda_setup_dev_PE(npu_pdev);
|
|
|
|
else
|
|
|
|
return pe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
|
2015-12-17 10:43:13 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
|
|
list_for_each_entry(pdev, &bus->devices, bus_list)
|
2016-01-11 13:53:49 +08:00
|
|
|
pnv_ioda_setup_npu_PE(pdev);
|
2015-12-17 10:43:13 +08:00
|
|
|
}
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static void pnv_pci_ioda_setup_PEs(void)
|
2012-08-20 11:49:14 +08:00
|
|
|
{
|
2018-12-19 16:52:17 +08:00
|
|
|
struct pci_controller *hose;
|
2014-07-21 12:42:30 +08:00
|
|
|
struct pnv_phb *phb;
|
2018-12-19 16:52:17 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2012-08-20 11:49:14 +08:00
|
|
|
|
2018-12-19 16:52:17 +08:00
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
2014-07-21 12:42:30 +08:00
|
|
|
phb = hose->private_data;
|
2018-01-23 19:31:36 +08:00
|
|
|
if (phb->type == PNV_PHB_NPU_NVLINK) {
|
2016-01-11 13:53:50 +08:00
|
|
|
/* PE#0 is needed for error reporting */
|
|
|
|
pnv_ioda_reserve_pe(phb, 0);
|
2016-01-11 13:53:49 +08:00
|
|
|
pnv_ioda_setup_npu_PEs(hose->bus);
|
2017-04-03 17:51:44 +08:00
|
|
|
if (phb->model == PNV_PHB_MODEL_NPU2)
|
2018-12-19 16:52:17 +08:00
|
|
|
WARN_ON_ONCE(pnv_npu2_init(hose));
|
2016-05-20 14:41:31 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
2018-12-19 16:52:17 +08:00
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
if (phb->type != PNV_PHB_IODA2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list)
|
|
|
|
pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:52 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2015-10-22 09:22:16 +08:00
|
|
|
static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
|
2015-03-25 16:23:57 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pci_dn *pdn;
|
2015-03-25 16:23:59 +08:00
|
|
|
int i, j;
|
2015-10-22 09:22:16 +08:00
|
|
|
int m64_bars;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
m64_bars = num_vfs;
|
|
|
|
else
|
|
|
|
m64_bars = 1;
|
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
|
2015-10-22 09:22:16 +08:00
|
|
|
for (j = 0; j < m64_bars; j++) {
|
|
|
|
if (pdn->m64_map[j][i] == IODA_INVALID_M64)
|
2015-03-25 16:23:59 +08:00
|
|
|
continue;
|
|
|
|
opal_pci_phb_mmio_enable(phb->opal_id,
|
2015-10-22 09:22:16 +08:00
|
|
|
OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
|
|
|
|
clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
|
|
|
|
pdn->m64_map[j][i] = IODA_INVALID_M64;
|
2015-03-25 16:23:59 +08:00
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
kfree(pdn->m64_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
|
2015-03-25 16:23:57 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
unsigned int win;
|
|
|
|
struct resource *res;
|
2015-03-25 16:23:59 +08:00
|
|
|
int i, j;
|
2015-03-25 16:23:57 +08:00
|
|
|
int64_t rc;
|
2015-03-25 16:23:59 +08:00
|
|
|
int total_vfs;
|
|
|
|
resource_size_t size, start;
|
|
|
|
int pe_num;
|
2015-10-22 09:22:16 +08:00
|
|
|
int m64_bars;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
2015-03-25 16:23:59 +08:00
|
|
|
total_vfs = pci_sriov_get_totalvfs(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
m64_bars = num_vfs;
|
|
|
|
else
|
|
|
|
m64_bars = 1;
|
|
|
|
|
2016-08-25 04:26:37 +08:00
|
|
|
pdn->m64_map = kmalloc_array(m64_bars,
|
|
|
|
sizeof(*pdn->m64_map),
|
|
|
|
GFP_KERNEL);
|
2015-10-22 09:22:16 +08:00
|
|
|
if (!pdn->m64_map)
|
|
|
|
return -ENOMEM;
|
|
|
|
/* Initialize the m64_map to IODA_INVALID_M64 */
|
|
|
|
for (i = 0; i < m64_bars ; i++)
|
|
|
|
for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
|
|
|
|
pdn->m64_map[i][j] = IODA_INVALID_M64;
|
2015-03-25 16:23:59 +08:00
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || !res->parent)
|
|
|
|
continue;
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
for (j = 0; j < m64_bars; j++) {
|
2015-03-25 16:23:59 +08:00
|
|
|
do {
|
|
|
|
win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
|
|
|
|
phb->ioda.m64_bar_idx + 1, 0);
|
|
|
|
|
|
|
|
if (win >= phb->ioda.m64_bar_idx + 1)
|
|
|
|
goto m64_failed;
|
|
|
|
} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_map[j][i] = win;
|
2015-03-25 16:23:59 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
2015-03-25 16:23:59 +08:00
|
|
|
size = pci_iov_resource_size(pdev,
|
|
|
|
PCI_IOV_RESOURCES + i);
|
|
|
|
start = res->start + size * j;
|
|
|
|
} else {
|
|
|
|
size = resource_size(res);
|
|
|
|
start = res->start;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the M64 here */
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
2015-10-22 09:22:19 +08:00
|
|
|
pe_num = pdn->pe_num_map[j];
|
2015-03-25 16:23:59 +08:00
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe_num, OPAL_M64_WINDOW_TYPE,
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_map[j][i], 0);
|
2015-03-25 16:23:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = opal_pci_set_phb_mem_window(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_map[j][i],
|
2015-03-25 16:23:59 +08:00
|
|
|
start,
|
|
|
|
0, /* unused */
|
|
|
|
size);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
|
|
|
|
win, rc);
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
2015-03-25 16:23:59 +08:00
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
2015-10-22 09:22:16 +08:00
|
|
|
OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
|
2015-03-25 16:23:59 +08:00
|
|
|
else
|
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
2015-10-22 09:22:16 +08:00
|
|
|
OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
|
|
|
|
win, rc);
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
m64_failed:
|
2015-10-22 09:22:16 +08:00
|
|
|
pnv_pci_vf_release_m64(pdev, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:21 +08:00
|
|
|
static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
|
|
|
|
int num);
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
int64_t rc;
|
|
|
|
|
2015-06-05 14:35:08 +08:00
|
|
|
tbl = pe->table_group.tables[0];
|
2015-06-05 14:35:21 +08:00
|
|
|
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
2015-03-25 16:23:57 +08:00
|
|
|
if (rc)
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-06-05 14:35:21 +08:00
|
|
|
pnv_pci_ioda2_set_bypass(pe, false);
|
2015-06-05 14:35:09 +08:00
|
|
|
if (pe->table_group.group) {
|
|
|
|
iommu_group_put(pe->table_group.group);
|
|
|
|
BUG_ON(pe->table_group.group);
|
2015-06-05 14:34:56 +08:00
|
|
|
}
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
|
2015-03-25 16:23:57 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pnv_ioda_pe *pe, *pe_n;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
2015-03-25 16:23:59 +08:00
|
|
|
pdn = pci_get_pdn(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
if (!pdev->is_physfn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
|
|
|
|
if (pe->parent_dev != pdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pnv_pci_ioda2_release_dma_pe(pdev, pe);
|
|
|
|
|
|
|
|
/* Remove from list */
|
|
|
|
mutex_lock(&phb->ioda.pe_list_mutex);
|
|
|
|
list_del(&pe->list);
|
|
|
|
mutex_unlock(&phb->ioda.pe_list_mutex);
|
|
|
|
|
|
|
|
pnv_ioda_deconfigure_pe(phb, pe);
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnv_pci_sriov_disable(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2015-03-25 16:23:57 +08:00
|
|
|
struct pci_dn *pdn;
|
2015-10-22 09:22:19 +08:00
|
|
|
u16 num_vfs, i;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
num_vfs = pdn->num_vfs;
|
|
|
|
|
|
|
|
/* Release VF PEs */
|
2015-10-22 09:22:16 +08:00
|
|
|
pnv_ioda_release_vf_PE(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_IODA2) {
|
2015-10-22 09:22:16 +08:00
|
|
|
if (!pdn->m64_single_mode)
|
2015-10-22 09:22:19 +08:00
|
|
|
pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
/* Release M64 windows */
|
2015-10-22 09:22:16 +08:00
|
|
|
pnv_pci_vf_release_m64(pdev, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
/* Release PE numbers */
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
|
|
|
for (i = 0; i < num_vfs; i++) {
|
2016-05-03 13:41:36 +08:00
|
|
|
if (pdn->pe_num_map[i] == IODA_INVALID_PE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
|
|
|
|
pnv_ioda_free_pe(pe);
|
2015-10-22 09:22:19 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
|
|
|
|
/* Releasing pe_num_map */
|
|
|
|
kfree(pdn->pe_num_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe);
|
2018-12-19 16:52:22 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2018-12-19 16:52:26 +08:00
|
|
|
static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
|
|
|
|
struct iommu_table_group *table_group, struct pci_bus *bus);
|
|
|
|
|
2018-12-19 16:52:22 +08:00
|
|
|
#endif
|
2015-03-25 16:23:57 +08:00
|
|
|
static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
int pe_num;
|
|
|
|
u16 vf_index;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
|
|
|
|
if (!pdev->is_physfn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Reserve PE for each VF */
|
|
|
|
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
|
powerpc/powernv/iov: Ensure the pdn for VFs always contains a valid PE number
On pseries there is a bug with adding hotplugged devices to an IOMMU
group. For a number of dumb reasons fixing that bug first requires
re-working how VFs are configured on PowerNV. For background, on
PowerNV we use the pcibios_sriov_enable() hook to do two things:
1. Create a pci_dn structure for each of the VFs, and
2. Configure the PHB's internal BARs so the MMIO range for each VF
maps to a unique PE.
Roughly speaking a PE is the hardware counterpart to a Linux IOMMU
group since all the devices in a PE share the same IOMMU table. A PE
also defines the set of devices that should be isolated in response to
a PCI error (i.e. bad DMA, UR/CA, AER events, etc). When isolated all
MMIO and DMA traffic to and from devicein the PE is blocked by the
root complex until the PE is recovered by the OS.
The requirement to block MMIO causes a giant headache because the P8
PHB generally uses a fixed mapping between MMIO addresses and PEs. As
a result we need to delay configuring the IOMMU groups for device
until after MMIO resources are assigned. For physical devices (i.e.
non-VFs) the PE assignment is done in pcibios_setup_bridge() which is
called immediately after the MMIO resources for downstream
devices (and the bridge's windows) are assigned. For VFs the setup is
more complicated because:
a) pcibios_setup_bridge() is not called again when VFs are activated, and
b) The pci_dev for VFs are created by generic code which runs after
pcibios_sriov_enable() is called.
The work around for this is a two step process:
1. A fixup in pcibios_add_device() is used to initialised the cached
pe_number in pci_dn, then
2. A bus notifier then adds the device to the IOMMU group for the PE
specified in pci_dn->pe_number.
A side effect fixing the pseries bug mentioned in the first paragraph
is moving the fixup out of pcibios_add_device() and into
pcibios_bus_add_device(), which is called much later. This results in
step 2. failing because pci_dn->pe_number won't be initialised when
the bus notifier is run.
We can fix this by removing the need for the fixup. The PE for a VF is
known before the VF is even scanned so we can initialise
pci_dn->pe_number pcibios_sriov_enable() instead. Unfortunately,
moving the initialisation causes two problems:
1. We trip the WARN_ON() in the current fixup code, and
2. The EEH core clears pdn->pe_number when recovering a VF and
relies on the fixup to correctly re-set it.
The only justification for either of these is a comment in
eeh_rmv_device() suggesting that pdn->pe_number *must* be set to
IODA_INVALID_PE in order for the VF to be scanned. However, this
comment appears to have no basis in reality. Both bugs can be fixed by
just deleting the code.
Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191028085424.12006-1-oohall@gmail.com
2019-10-28 16:54:22 +08:00
|
|
|
int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
|
|
|
|
int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
|
|
|
|
struct pci_dn *vf_pdn;
|
|
|
|
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
pe_num = pdn->pe_num_map[vf_index];
|
|
|
|
else
|
|
|
|
pe_num = *pdn->pe_num_map + vf_index;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
pe->pe_number = pe_num;
|
|
|
|
pe->phb = phb;
|
|
|
|
pe->flags = PNV_IODA_PE_VF;
|
|
|
|
pe->pbus = NULL;
|
|
|
|
pe->parent_dev = pdev;
|
|
|
|
pe->mve_number = -1;
|
powerpc/powernv/iov: Ensure the pdn for VFs always contains a valid PE number
On pseries there is a bug with adding hotplugged devices to an IOMMU
group. For a number of dumb reasons fixing that bug first requires
re-working how VFs are configured on PowerNV. For background, on
PowerNV we use the pcibios_sriov_enable() hook to do two things:
1. Create a pci_dn structure for each of the VFs, and
2. Configure the PHB's internal BARs so the MMIO range for each VF
maps to a unique PE.
Roughly speaking a PE is the hardware counterpart to a Linux IOMMU
group since all the devices in a PE share the same IOMMU table. A PE
also defines the set of devices that should be isolated in response to
a PCI error (i.e. bad DMA, UR/CA, AER events, etc). When isolated all
MMIO and DMA traffic to and from devicein the PE is blocked by the
root complex until the PE is recovered by the OS.
The requirement to block MMIO causes a giant headache because the P8
PHB generally uses a fixed mapping between MMIO addresses and PEs. As
a result we need to delay configuring the IOMMU groups for device
until after MMIO resources are assigned. For physical devices (i.e.
non-VFs) the PE assignment is done in pcibios_setup_bridge() which is
called immediately after the MMIO resources for downstream
devices (and the bridge's windows) are assigned. For VFs the setup is
more complicated because:
a) pcibios_setup_bridge() is not called again when VFs are activated, and
b) The pci_dev for VFs are created by generic code which runs after
pcibios_sriov_enable() is called.
The work around for this is a two step process:
1. A fixup in pcibios_add_device() is used to initialised the cached
pe_number in pci_dn, then
2. A bus notifier then adds the device to the IOMMU group for the PE
specified in pci_dn->pe_number.
A side effect fixing the pseries bug mentioned in the first paragraph
is moving the fixup out of pcibios_add_device() and into
pcibios_bus_add_device(), which is called much later. This results in
step 2. failing because pci_dn->pe_number won't be initialised when
the bus notifier is run.
We can fix this by removing the need for the fixup. The PE for a VF is
known before the VF is even scanned so we can initialise
pci_dn->pe_number pcibios_sriov_enable() instead. Unfortunately,
moving the initialisation causes two problems:
1. We trip the WARN_ON() in the current fixup code, and
2. The EEH core clears pdn->pe_number when recovering a VF and
relies on the fixup to correctly re-set it.
The only justification for either of these is a comment in
eeh_rmv_device() suggesting that pdn->pe_number *must* be set to
IODA_INVALID_PE in order for the VF to be scanned. However, this
comment appears to have no basis in reality. Both bugs can be fixed by
just deleting the code.
Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191028085424.12006-1-oohall@gmail.com
2019-10-28 16:54:22 +08:00
|
|
|
pe->rid = (vf_bus << 8) | vf_devfn;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
|
2015-03-25 16:23:57 +08:00
|
|
|
hose->global_number, pdev->bus->number,
|
powerpc/powernv/iov: Ensure the pdn for VFs always contains a valid PE number
On pseries there is a bug with adding hotplugged devices to an IOMMU
group. For a number of dumb reasons fixing that bug first requires
re-working how VFs are configured on PowerNV. For background, on
PowerNV we use the pcibios_sriov_enable() hook to do two things:
1. Create a pci_dn structure for each of the VFs, and
2. Configure the PHB's internal BARs so the MMIO range for each VF
maps to a unique PE.
Roughly speaking a PE is the hardware counterpart to a Linux IOMMU
group since all the devices in a PE share the same IOMMU table. A PE
also defines the set of devices that should be isolated in response to
a PCI error (i.e. bad DMA, UR/CA, AER events, etc). When isolated all
MMIO and DMA traffic to and from devicein the PE is blocked by the
root complex until the PE is recovered by the OS.
The requirement to block MMIO causes a giant headache because the P8
PHB generally uses a fixed mapping between MMIO addresses and PEs. As
a result we need to delay configuring the IOMMU groups for device
until after MMIO resources are assigned. For physical devices (i.e.
non-VFs) the PE assignment is done in pcibios_setup_bridge() which is
called immediately after the MMIO resources for downstream
devices (and the bridge's windows) are assigned. For VFs the setup is
more complicated because:
a) pcibios_setup_bridge() is not called again when VFs are activated, and
b) The pci_dev for VFs are created by generic code which runs after
pcibios_sriov_enable() is called.
The work around for this is a two step process:
1. A fixup in pcibios_add_device() is used to initialised the cached
pe_number in pci_dn, then
2. A bus notifier then adds the device to the IOMMU group for the PE
specified in pci_dn->pe_number.
A side effect fixing the pseries bug mentioned in the first paragraph
is moving the fixup out of pcibios_add_device() and into
pcibios_bus_add_device(), which is called much later. This results in
step 2. failing because pci_dn->pe_number won't be initialised when
the bus notifier is run.
We can fix this by removing the need for the fixup. The PE for a VF is
known before the VF is even scanned so we can initialise
pci_dn->pe_number pcibios_sriov_enable() instead. Unfortunately,
moving the initialisation causes two problems:
1. We trip the WARN_ON() in the current fixup code, and
2. The EEH core clears pdn->pe_number when recovering a VF and
relies on the fixup to correctly re-set it.
The only justification for either of these is a comment in
eeh_rmv_device() suggesting that pdn->pe_number *must* be set to
IODA_INVALID_PE in order for the VF to be scanned. However, this
comment appears to have no basis in reality. Both bugs can be fixed by
just deleting the code.
Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191028085424.12006-1-oohall@gmail.com
2019-10-28 16:54:22 +08:00
|
|
|
PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
if (pnv_ioda_configure_pe(phb, pe)) {
|
|
|
|
/* XXX What do we do here ? */
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2015-03-25 16:23:57 +08:00
|
|
|
pe->pdev = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Put PE to the list */
|
|
|
|
mutex_lock(&phb->ioda.pe_list_mutex);
|
|
|
|
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
|
|
|
mutex_unlock(&phb->ioda.pe_list_mutex);
|
|
|
|
|
powerpc/powernv/iov: Ensure the pdn for VFs always contains a valid PE number
On pseries there is a bug with adding hotplugged devices to an IOMMU
group. For a number of dumb reasons fixing that bug first requires
re-working how VFs are configured on PowerNV. For background, on
PowerNV we use the pcibios_sriov_enable() hook to do two things:
1. Create a pci_dn structure for each of the VFs, and
2. Configure the PHB's internal BARs so the MMIO range for each VF
maps to a unique PE.
Roughly speaking a PE is the hardware counterpart to a Linux IOMMU
group since all the devices in a PE share the same IOMMU table. A PE
also defines the set of devices that should be isolated in response to
a PCI error (i.e. bad DMA, UR/CA, AER events, etc). When isolated all
MMIO and DMA traffic to and from devicein the PE is blocked by the
root complex until the PE is recovered by the OS.
The requirement to block MMIO causes a giant headache because the P8
PHB generally uses a fixed mapping between MMIO addresses and PEs. As
a result we need to delay configuring the IOMMU groups for device
until after MMIO resources are assigned. For physical devices (i.e.
non-VFs) the PE assignment is done in pcibios_setup_bridge() which is
called immediately after the MMIO resources for downstream
devices (and the bridge's windows) are assigned. For VFs the setup is
more complicated because:
a) pcibios_setup_bridge() is not called again when VFs are activated, and
b) The pci_dev for VFs are created by generic code which runs after
pcibios_sriov_enable() is called.
The work around for this is a two step process:
1. A fixup in pcibios_add_device() is used to initialised the cached
pe_number in pci_dn, then
2. A bus notifier then adds the device to the IOMMU group for the PE
specified in pci_dn->pe_number.
A side effect fixing the pseries bug mentioned in the first paragraph
is moving the fixup out of pcibios_add_device() and into
pcibios_bus_add_device(), which is called much later. This results in
step 2. failing because pci_dn->pe_number won't be initialised when
the bus notifier is run.
We can fix this by removing the need for the fixup. The PE for a VF is
known before the VF is even scanned so we can initialise
pci_dn->pe_number pcibios_sriov_enable() instead. Unfortunately,
moving the initialisation causes two problems:
1. We trip the WARN_ON() in the current fixup code, and
2. The EEH core clears pdn->pe_number when recovering a VF and
relies on the fixup to correctly re-set it.
The only justification for either of these is a comment in
eeh_rmv_device() suggesting that pdn->pe_number *must* be set to
IODA_INVALID_PE in order for the VF to be scanned. However, this
comment appears to have no basis in reality. Both bugs can be fixed by
just deleting the code.
Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191028085424.12006-1-oohall@gmail.com
2019-10-28 16:54:22 +08:00
|
|
|
/* associate this pe to it's pdn */
|
|
|
|
list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
|
|
|
|
if (vf_pdn->busno == vf_bus &&
|
|
|
|
vf_pdn->devfn == vf_devfn) {
|
|
|
|
vf_pdn->pe_number = pe_num;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
pnv_pci_ioda2_setup_dma_pe(phb, pe);
|
2018-12-19 16:52:22 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
powerpc/powernv/sriov: Register IOMMU groups for VFs
The compound IOMMU group rework moved iommu_register_group() together
in pnv_pci_ioda_setup_iommu_api() (which is a part of
ppc_md.pcibios_fixup). As the result, pnv_ioda_setup_bus_iommu_group()
does not create groups any more, it only adds devices to groups.
This works fine for boot time devices. However IOMMU groups for
SRIOV's VFs were added by pnv_ioda_setup_bus_iommu_group() so this got
broken: pnv_tce_iommu_bus_notifier() expects a group to be registered
for VF and it is not.
This adds missing group registration and adds a NULL pointer check
into the bus notifier so we won't crash if there is no group, although
it is not expected to happen now because of the change above.
Example oops seen prior to this patch:
$ echo 1 > /sys/bus/pci/devices/0000\:01\:00.0/sriov_numvfs
Unable to handle kernel paging request for data at address 0x00000030
Faulting instruction address: 0xc0000000004a6018
Oops: Kernel access of bad area, sig: 11 [#1]
LE SMP NR_CPUS=2048 NUMA PowerNV
CPU: 46 PID: 7006 Comm: bash Not tainted 4.15-ish
NIP: c0000000004a6018 LR: c0000000004a6014 CTR: 0000000000000000
REGS: c000008fc876b400 TRAP: 0300 Not tainted (4.15-ish)
MSR: 900000000280b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE>
CFAR: c000000000d0be20 DAR: 0000000000000030 DSISR: 40000000 SOFTE: 1
...
NIP sysfs_do_create_link_sd.isra.0+0x68/0x150
LR sysfs_do_create_link_sd.isra.0+0x64/0x150
Call Trace:
pci_dev_type+0x0/0x30 (unreliable)
iommu_group_add_device+0x8c/0x600
iommu_add_device+0xe8/0x180
pnv_tce_iommu_bus_notifier+0xb0/0xf0
notifier_call_chain+0x9c/0x110
blocking_notifier_call_chain+0x64/0xa0
device_add+0x524/0x7d0
pci_device_add+0x248/0x450
pci_iov_add_virtfn+0x294/0x3e0
pci_enable_sriov+0x43c/0x580
mlx5_core_sriov_configure+0x15c/0x2f0 [mlx5_core]
sriov_numvfs_store+0x180/0x240
dev_attr_store+0x3c/0x60
sysfs_kf_write+0x64/0x90
kernfs_fop_write+0x1ac/0x240
__vfs_write+0x3c/0x70
vfs_write+0xd8/0x220
SyS_write+0x6c/0x110
system_call+0x58/0x6c
Fixes: 0bd971676e68 ("powerpc/powernv/npu: Add compound IOMMU groups")
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reported-by: Santwana Samantray <santwana.samantray@in.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-18 13:58:01 +08:00
|
|
|
iommu_register_group(&pe->table_group,
|
|
|
|
pe->phb->hose->global_number, pe->pe_number);
|
2018-12-19 16:52:26 +08:00
|
|
|
pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
|
2018-12-19 16:52:22 +08:00
|
|
|
#endif
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2015-03-25 16:23:57 +08:00
|
|
|
struct pci_dn *pdn;
|
|
|
|
int ret;
|
2015-10-22 09:22:19 +08:00
|
|
|
u16 i;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_IODA2) {
|
2015-10-22 09:22:14 +08:00
|
|
|
if (!pdn->vfs_expanded) {
|
|
|
|
dev_info(&pdev->dev, "don't support this SRIOV device"
|
|
|
|
" with non 64bit-prefetchable IOV BAR\n");
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
/*
|
|
|
|
* When M64 BARs functions in Single PE mode, the number of VFs
|
|
|
|
* could be enabled must be less than the number of M64 BARs.
|
|
|
|
*/
|
|
|
|
if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
|
|
|
|
dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:19 +08:00
|
|
|
/* Allocating pe_num_map */
|
|
|
|
if (pdn->m64_single_mode)
|
2016-08-25 04:26:37 +08:00
|
|
|
pdn->pe_num_map = kmalloc_array(num_vfs,
|
|
|
|
sizeof(*pdn->pe_num_map),
|
|
|
|
GFP_KERNEL);
|
2015-10-22 09:22:19 +08:00
|
|
|
else
|
|
|
|
pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!pdn->pe_num_map)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
for (i = 0; i < num_vfs; i++)
|
|
|
|
pdn->pe_num_map[i] = IODA_INVALID_PE;
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
/* Calculate available PE for required VFs */
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
|
|
|
for (i = 0; i < num_vfs; i++) {
|
2016-05-03 13:41:36 +08:00
|
|
|
pe = pnv_ioda_alloc_pe(phb);
|
|
|
|
if (!pe) {
|
2015-10-22 09:22:19 +08:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2016-05-03 13:41:36 +08:00
|
|
|
|
|
|
|
pdn->pe_num_map[i] = pe->pe_number;
|
2015-10-22 09:22:19 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mutex_lock(&phb->ioda.pe_alloc_mutex);
|
|
|
|
*pdn->pe_num_map = bitmap_find_next_zero_area(
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.pe_alloc, phb->ioda.total_pe_num,
|
2015-10-22 09:22:19 +08:00
|
|
|
0, num_vfs, 0);
|
2016-05-03 13:41:24 +08:00
|
|
|
if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
|
2015-10-22 09:22:19 +08:00
|
|
|
mutex_unlock(&phb->ioda.pe_alloc_mutex);
|
|
|
|
dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
|
|
|
|
kfree(pdn->pe_num_map);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
mutex_unlock(&phb->ioda.pe_alloc_mutex);
|
|
|
|
}
|
|
|
|
pdn->num_vfs = num_vfs;
|
|
|
|
|
|
|
|
/* Assign M64 window accordingly */
|
2015-03-25 16:23:59 +08:00
|
|
|
ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_info(&pdev->dev, "Not enough M64 window resources\n");
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When using one M64 BAR to map one IOV BAR, we need to shift
|
|
|
|
* the IOV BAR according to the PE# allocated to the VFs.
|
|
|
|
* Otherwise, the PE# for the VF will conflict with others.
|
|
|
|
*/
|
2015-10-22 09:22:16 +08:00
|
|
|
if (!pdn->m64_single_mode) {
|
2015-10-22 09:22:19 +08:00
|
|
|
ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
|
2015-03-25 16:23:59 +08:00
|
|
|
if (ret)
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup VF PEs */
|
|
|
|
pnv_ioda_setup_vf_PE(pdev, num_vfs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
m64_failed:
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
|
|
|
for (i = 0; i < num_vfs; i++) {
|
2016-05-03 13:41:36 +08:00
|
|
|
if (pdn->pe_num_map[i] == IODA_INVALID_PE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
|
|
|
|
pnv_ioda_free_pe(pe);
|
2015-10-22 09:22:19 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
|
|
|
|
|
|
|
|
/* Releasing pe_num_map */
|
|
|
|
kfree(pdn->pe_num_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-11-09 22:00:33 +08:00
|
|
|
int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
|
2015-03-25 16:23:52 +08:00
|
|
|
{
|
2015-03-25 16:23:57 +08:00
|
|
|
pnv_pci_sriov_disable(pdev);
|
|
|
|
|
2015-03-25 16:23:52 +08:00
|
|
|
/* Release PCI data */
|
2019-08-21 14:26:54 +08:00
|
|
|
remove_sriov_vf_pdns(pdev);
|
2015-03-25 16:23:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-09 22:00:33 +08:00
|
|
|
int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
|
2015-03-25 16:23:52 +08:00
|
|
|
{
|
|
|
|
/* Allocate PCI data */
|
2019-08-21 14:26:54 +08:00
|
|
|
add_sriov_vf_pdns(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
return pnv_pci_sriov_enable(pdev, num_vfs);
|
2015-03-25 16:23:52 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2020-01-10 15:02:06 +08:00
|
|
|
static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2020-01-10 15:02:06 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
2013-04-26 03:21:02 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:21:02 +08:00
|
|
|
/*
|
|
|
|
* The function can be called while the PE#
|
|
|
|
* hasn't been assigned. Do nothing for the
|
|
|
|
* case.
|
|
|
|
*/
|
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:21:02 +08:00
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
2014-02-11 08:32:38 +08:00
|
|
|
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
|
2019-02-13 15:01:32 +08:00
|
|
|
pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
|
2015-06-05 14:35:08 +08:00
|
|
|
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
|
2015-06-05 14:34:54 +08:00
|
|
|
/*
|
|
|
|
* Note: iommu_add_device() will fail here as
|
|
|
|
* for physical PE: the device is already added by now;
|
|
|
|
* for virtual PE: sysfs entries are not ready yet and
|
|
|
|
* tce_iommu_bus_notifier will add the device to a group later.
|
|
|
|
*/
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2017-06-21 15:18:04 +08:00
|
|
|
/*
|
|
|
|
* Reconfigure TVE#0 to be usable as 64-bit DMA space.
|
|
|
|
*
|
|
|
|
* The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
|
|
|
|
* Devices can only access more than that if bit 59 of the PCI address is set
|
|
|
|
* by hardware, which indicates TVE#1 should be used instead of TVE#0.
|
|
|
|
* Many PCI devices are not capable of addressing that many bits, and as a
|
|
|
|
* result are limited to the 4GB of virtual memory made available to 32-bit
|
|
|
|
* devices in TVE#0.
|
|
|
|
*
|
|
|
|
* In order to work around this, reconfigure TVE#0 to be suitable for 64-bit
|
|
|
|
* devices by configuring the virtual memory past the first 4GB inaccessible
|
|
|
|
* by 64-bit DMAs. This should only be used by devices that want more than
|
|
|
|
* 4GB, and only on PEs that have no 32-bit devices.
|
|
|
|
*
|
|
|
|
* Currently this will only work on PHB3 (POWER8).
|
|
|
|
*/
|
|
|
|
static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
u64 window_size, table_size, tce_count, addr;
|
|
|
|
struct page *table_pages;
|
|
|
|
u64 tce_order = 28; /* 256MB TCEs */
|
|
|
|
__be64 *tces;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Window size needs to be a power of two, but needs to account for
|
|
|
|
* shifting memory by the 4GB offset required to skip 32bit space.
|
|
|
|
*/
|
|
|
|
window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32));
|
|
|
|
tce_count = window_size >> tce_order;
|
|
|
|
table_size = tce_count << 3;
|
|
|
|
|
|
|
|
if (table_size < PAGE_SIZE)
|
|
|
|
table_size = PAGE_SIZE;
|
|
|
|
|
|
|
|
table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
|
|
|
|
get_order(table_size));
|
|
|
|
if (!table_pages)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
tces = page_address(table_pages);
|
|
|
|
if (!tces)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
memset(tces, 0, table_size);
|
|
|
|
|
|
|
|
for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) {
|
|
|
|
tces[(addr + (1ULL << 32)) >> tce_order] =
|
|
|
|
cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
/* reconfigure window 0 */
|
|
|
|
(pe->pe_number << 1) + 0,
|
|
|
|
1,
|
|
|
|
__pa(tces),
|
|
|
|
table_size,
|
|
|
|
1 << tce_order);
|
|
|
|
if (rc == OPAL_SUCCESS) {
|
|
|
|
pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
err:
|
|
|
|
pe_err(pe, "Error configuring 64-bit DMA bypass\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2019-02-13 15:01:14 +08:00
|
|
|
static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
|
|
|
|
u64 dma_mask)
|
2014-02-11 08:32:38 +08:00
|
|
|
{
|
powerpc/powernv: Move dma_set_mask() from pnv_phb to pci_controller_ops
Previously, dma_set_mask() on powernv was convoluted:
0) Call dma_set_mask() (a/p/kernel/dma.c)
1) In dma_set_mask(), ppc_md.dma_set_mask() exists, so call it.
2) On powernv, that function pointer is pnv_dma_set_mask().
In pnv_dma_set_mask(), the device is pci, so call pnv_pci_dma_set_mask().
3) In pnv_pci_dma_set_mask(), call pnv_phb->set_dma_mask() if it exists.
4) It only exists in the ioda case, where it points to
pnv_pci_ioda_dma_set_mask(), which is the final function.
So the call chain is:
dma_set_mask() ->
pnv_dma_set_mask() ->
pnv_pci_dma_set_mask() ->
pnv_pci_ioda_dma_set_mask()
Both ppc_md and pnv_phb function pointers are used.
Rip out the ppc_md call, pnv_dma_set_mask() and pnv_pci_dma_set_mask().
Instead:
0) Call dma_set_mask() (a/p/kernel/dma.c)
1) In dma_set_mask(), the device is pci, and pci_controller_ops.dma_set_mask()
exists, so call pci_controller_ops.dma_set_mask()
2) In the ioda case, that points to pnv_pci_ioda_dma_set_mask().
The new call chain is
dma_set_mask() ->
pnv_pci_ioda_dma_set_mask()
Now only the pci_controller_ops function pointer is used.
The fallback paths for p5ioc2 are the same.
Previously, pnv_pci_dma_set_mask() would find no pnv_phb->set_dma_mask()
function, to it would call __set_dma_mask().
Now, dma_set_mask() finds no ppc_md call or pci_controller_ops call,
so it calls __set_dma_mask().
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-04-28 13:12:07 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-02-11 08:32:38 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
|
2019-04-10 14:48:00 +08:00
|
|
|
return false;
|
2014-02-11 08:32:38 +08:00
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
if (pe->tce_bypass_enabled) {
|
2019-02-13 15:01:14 +08:00
|
|
|
u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
|
|
|
|
if (dma_mask >= top)
|
|
|
|
return true;
|
2014-02-11 08:32:38 +08:00
|
|
|
}
|
|
|
|
|
2019-02-13 15:01:14 +08:00
|
|
|
/*
|
|
|
|
* If the device can't set the TCE bypass bit but still wants
|
|
|
|
* to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
|
|
|
|
* bypass the 32-bit region and be usable for 64-bit DMAs.
|
|
|
|
* The device needs to be able to address all of this space.
|
|
|
|
*/
|
|
|
|
if (dma_mask >> 32 &&
|
|
|
|
dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
|
|
|
|
/* pe->pdev should be set if it's a single device, pe->pbus if not */
|
|
|
|
(pe->device_count == 1 || !pe->pbus) &&
|
|
|
|
phb->model == PNV_PHB_MODEL_PHB3) {
|
|
|
|
/* Configure the bypass mode */
|
|
|
|
s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
|
|
|
|
if (rc)
|
2019-04-10 14:48:00 +08:00
|
|
|
return false;
|
2019-02-13 15:01:14 +08:00
|
|
|
/* 4GB offset bypasses 32-bit space */
|
2019-02-13 15:01:32 +08:00
|
|
|
pdev->dev.archdata.dma_offset = (1ULL << 32);
|
2019-02-13 15:01:14 +08:00
|
|
|
return true;
|
2014-02-11 08:32:38 +08:00
|
|
|
}
|
2015-12-17 10:43:13 +08:00
|
|
|
|
2019-02-13 15:01:14 +08:00
|
|
|
return false;
|
2014-09-30 10:39:10 +08:00
|
|
|
}
|
|
|
|
|
2018-12-19 16:52:22 +08:00
|
|
|
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
|
2013-07-01 15:54:09 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
2015-06-05 14:35:08 +08:00
|
|
|
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
2019-02-13 15:01:32 +08:00
|
|
|
dev->dev.archdata.dma_offset = pe->tce_bypass_base;
|
2014-07-15 15:00:55 +08:00
|
|
|
|
2015-06-18 09:41:36 +08:00
|
|
|
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
2018-12-19 16:52:22 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
2013-07-01 15:54:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:14 +08:00
|
|
|
static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
|
|
|
|
bool real_mode)
|
|
|
|
{
|
|
|
|
return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
|
|
|
|
(phb->regs + 0x210);
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
|
2015-06-05 14:35:07 +08:00
|
|
|
unsigned long index, unsigned long npages, bool rm)
|
2013-04-26 03:21:00 +08:00
|
|
|
{
|
2015-06-05 14:35:09 +08:00
|
|
|
struct iommu_table_group_link *tgl = list_first_entry_or_null(
|
|
|
|
&tbl->it_group_list, struct iommu_table_group_link,
|
|
|
|
next);
|
|
|
|
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
2015-06-05 14:35:08 +08:00
|
|
|
struct pnv_ioda_pe, table_group);
|
2016-07-08 14:37:14 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
|
2013-04-26 03:21:00 +08:00
|
|
|
unsigned long start, end, inc;
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
|
|
|
|
end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
|
|
|
|
npages - 1);
|
2013-04-26 03:21:00 +08:00
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
/* p7ioc-style invalidation, 2 TCEs per write */
|
|
|
|
start |= (1ull << 63);
|
|
|
|
end |= (1ull << 63);
|
|
|
|
inc = 16;
|
2013-04-26 03:21:00 +08:00
|
|
|
end |= inc - 1; /* round up end to be different than start */
|
|
|
|
|
|
|
|
mb(); /* Ensure above stores are visible */
|
|
|
|
while (start <= end) {
|
2013-08-28 16:37:43 +08:00
|
|
|
if (rm)
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_rm_writeq_be(start, invalidate);
|
2013-08-28 16:37:43 +08:00
|
|
|
else
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_writeq_be(start, invalidate);
|
|
|
|
|
2013-04-26 03:21:00 +08:00
|
|
|
start += inc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The iommu layer will do another mb() for us on build()
|
|
|
|
* and we don't care on free()
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
|
|
|
|
long npages, unsigned long uaddr,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2015-06-05 14:35:07 +08:00
|
|
|
{
|
|
|
|
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
|
|
|
|
attrs);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
if (!ret)
|
2016-07-08 14:37:12 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
|
2015-06-05 14:35:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2019-08-29 16:52:48 +08:00
|
|
|
/* Common for IODA1 and IODA2 */
|
|
|
|
static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index,
|
|
|
|
unsigned long *hpa, enum dma_data_direction *direction,
|
|
|
|
bool realmode)
|
2015-06-05 14:35:15 +08:00
|
|
|
{
|
2019-08-29 16:52:48 +08:00
|
|
|
return pnv_tce_xchg(tbl, index, hpa, direction, !realmode);
|
2017-03-22 12:21:48 +08:00
|
|
|
}
|
2015-06-05 14:35:15 +08:00
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
|
|
|
|
long npages)
|
|
|
|
{
|
|
|
|
pnv_tce_free(tbl, index, npages);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
|
2015-06-05 14:35:07 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:06 +08:00
|
|
|
static struct iommu_table_ops pnv_ioda1_iommu_ops = {
|
2015-06-05 14:35:07 +08:00
|
|
|
.set = pnv_ioda1_tce_build,
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2019-08-29 16:52:48 +08:00
|
|
|
.xchg_no_kill = pnv_ioda_tce_xchg_no_kill,
|
|
|
|
.tce_kill = pnv_pci_p7ioc_tce_invalidate,
|
2018-07-04 14:13:47 +08:00
|
|
|
.useraddrptr = pnv_tce_useraddrptr,
|
2015-06-05 14:35:15 +08:00
|
|
|
#endif
|
2015-06-05 14:35:07 +08:00
|
|
|
.clear = pnv_ioda1_tce_free,
|
2015-06-05 14:35:06 +08:00
|
|
|
.get = pnv_tce_get,
|
|
|
|
};
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
#define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
|
|
|
|
#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
|
|
|
|
#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
|
2016-04-29 16:55:17 +08:00
|
|
|
|
2017-05-03 11:24:08 +08:00
|
|
|
static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
|
2016-04-29 16:55:18 +08:00
|
|
|
{
|
2016-07-08 14:37:14 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
|
2016-07-08 14:37:12 +08:00
|
|
|
const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
|
2016-04-29 16:55:18 +08:00
|
|
|
|
|
|
|
mb(); /* Ensure previous TCE table stores are visible */
|
|
|
|
if (rm)
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_rm_writeq_be(val, invalidate);
|
2016-04-29 16:55:18 +08:00
|
|
|
else
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_writeq_be(val, invalidate);
|
2016-04-29 16:55:18 +08:00
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
|
2015-06-05 14:35:12 +08:00
|
|
|
{
|
|
|
|
/* 01xb - invalidate TCEs that match the specified PE# */
|
2016-07-08 14:37:14 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
|
2016-07-08 14:37:12 +08:00
|
|
|
unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
|
2015-06-05 14:35:12 +08:00
|
|
|
|
|
|
|
mb(); /* Ensure above stores are visible */
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_writeq_be(val, invalidate);
|
2015-06-05 14:35:12 +08:00
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:14 +08:00
|
|
|
static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
|
|
|
|
unsigned shift, unsigned long index,
|
|
|
|
unsigned long npages)
|
2013-04-26 03:21:00 +08:00
|
|
|
{
|
2016-08-03 16:40:45 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
|
2013-04-26 03:21:00 +08:00
|
|
|
unsigned long start, end, inc;
|
|
|
|
|
|
|
|
/* We'll invalidate DMA address in PE scope */
|
2016-07-08 14:37:12 +08:00
|
|
|
start = PHB3_TCE_KILL_INVAL_ONE;
|
2016-07-08 14:37:14 +08:00
|
|
|
start |= (pe->pe_number & 0xFF);
|
2013-04-26 03:21:00 +08:00
|
|
|
end = start;
|
|
|
|
|
|
|
|
/* Figure out the start, end and step */
|
2015-06-05 14:35:07 +08:00
|
|
|
start |= (index << shift);
|
|
|
|
end |= ((index + npages - 1) << shift);
|
2014-06-06 16:44:01 +08:00
|
|
|
inc = (0x1ull << shift);
|
2013-04-26 03:21:00 +08:00
|
|
|
mb();
|
|
|
|
|
|
|
|
while (start <= end) {
|
2013-08-28 16:37:43 +08:00
|
|
|
if (rm)
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_rm_writeq_be(start, invalidate);
|
2013-08-28 16:37:43 +08:00
|
|
|
else
|
2018-05-14 20:50:32 +08:00
|
|
|
__raw_writeq_be(start, invalidate);
|
2013-04-26 03:21:00 +08:00
|
|
|
start += inc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:15 +08:00
|
|
|
static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
|
|
|
|
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
|
|
|
|
pnv_pci_phb3_tce_invalidate_pe(pe);
|
|
|
|
else
|
|
|
|
opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
|
|
|
|
pe->pe_number, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:13 +08:00
|
|
|
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
|
|
|
|
unsigned long index, unsigned long npages, bool rm)
|
|
|
|
{
|
|
|
|
struct iommu_table_group_link *tgl;
|
|
|
|
|
2017-03-22 12:21:48 +08:00
|
|
|
list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
|
2015-06-05 14:35:13 +08:00
|
|
|
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
|
|
|
struct pnv_ioda_pe, table_group);
|
2016-07-08 14:37:15 +08:00
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
unsigned int shift = tbl->it_page_shift;
|
|
|
|
|
2017-01-10 12:41:44 +08:00
|
|
|
/*
|
|
|
|
* NVLink1 can use the TCE kill register directly as
|
|
|
|
* it's the same as PHB3. NVLink2 is different and
|
|
|
|
* should go via the OPAL call.
|
|
|
|
*/
|
|
|
|
if (phb->model == PNV_PHB_MODEL_NPU) {
|
2016-04-29 16:55:18 +08:00
|
|
|
/*
|
|
|
|
* The NVLink hardware does not support TCE kill
|
|
|
|
* per TCE entry so we have to invalidate
|
|
|
|
* the entire cache for it.
|
|
|
|
*/
|
2016-07-08 14:37:15 +08:00
|
|
|
pnv_pci_phb3_tce_invalidate_entire(phb, rm);
|
2016-04-29 16:55:23 +08:00
|
|
|
continue;
|
|
|
|
}
|
2016-07-08 14:37:15 +08:00
|
|
|
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
|
|
|
|
pnv_pci_phb3_tce_invalidate(pe, rm, shift,
|
|
|
|
index, npages);
|
|
|
|
else
|
|
|
|
opal_pci_tce_kill(phb->opal_id,
|
|
|
|
OPAL_PCI_TCE_KILL_PAGES,
|
|
|
|
pe->pe_number, 1u << shift,
|
|
|
|
index << shift, npages);
|
2015-06-05 14:35:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-03 11:24:08 +08:00
|
|
|
void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
|
|
|
|
{
|
|
|
|
if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
|
|
|
|
pnv_pci_phb3_tce_invalidate_entire(phb, rm);
|
|
|
|
else
|
|
|
|
opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
|
|
|
|
long npages, unsigned long uaddr,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2013-04-26 03:21:00 +08:00
|
|
|
{
|
2015-06-05 14:35:07 +08:00
|
|
|
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
|
|
|
|
attrs);
|
2013-04-26 03:21:00 +08:00
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
if (!ret)
|
2015-06-05 14:35:07 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
|
|
|
|
long npages)
|
|
|
|
{
|
|
|
|
pnv_tce_free(tbl, index, npages);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
|
2013-04-26 03:21:00 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:06 +08:00
|
|
|
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
|
2015-06-05 14:35:07 +08:00
|
|
|
.set = pnv_ioda2_tce_build,
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2019-08-29 16:52:48 +08:00
|
|
|
.xchg_no_kill = pnv_ioda_tce_xchg_no_kill,
|
|
|
|
.tce_kill = pnv_pci_ioda2_tce_invalidate,
|
2018-07-04 14:13:47 +08:00
|
|
|
.useraddrptr = pnv_tce_useraddrptr,
|
2015-06-05 14:35:15 +08:00
|
|
|
#endif
|
2015-06-05 14:35:07 +08:00
|
|
|
.clear = pnv_ioda2_tce_free,
|
2015-06-05 14:35:06 +08:00
|
|
|
.get = pnv_tce_get,
|
2018-07-04 14:13:44 +08:00
|
|
|
.free = pnv_pci_ioda2_table_free_pages,
|
2015-06-05 14:35:06 +08:00
|
|
|
};
|
|
|
|
|
2016-05-03 13:41:34 +08:00
|
|
|
static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
|
|
|
|
{
|
|
|
|
unsigned int *weight = (unsigned int *)data;
|
|
|
|
|
|
|
|
/* This is quite simplistic. The "base" weight of a device
|
|
|
|
* is 10. 0 means no DMA is to be accounted for it.
|
|
|
|
*/
|
|
|
|
if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
|
|
|
|
dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
|
|
|
|
dev->class == PCI_CLASS_SERIAL_USB_EHCI)
|
|
|
|
*weight += 3;
|
|
|
|
else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
|
|
|
|
*weight += 15;
|
|
|
|
else
|
|
|
|
*weight += 10;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
unsigned int weight = 0;
|
|
|
|
|
|
|
|
/* SRIOV VF has same DMA32 weight as its PF */
|
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
|
|
|
|
pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
|
|
|
|
return weight;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
|
|
|
|
pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
|
|
|
|
} else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
|
|
list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
|
|
|
|
pnv_pci_ioda_dev_dma_weight(pdev, &weight);
|
|
|
|
} else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
|
|
|
|
pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
|
|
|
|
}
|
|
|
|
|
|
|
|
return weight;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:32 +08:00
|
|
|
static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
|
2016-05-05 10:04:16 +08:00
|
|
|
struct pnv_ioda_pe *pe)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
struct page *tce_mem = NULL;
|
|
|
|
struct iommu_table *tbl;
|
2016-05-05 10:04:16 +08:00
|
|
|
unsigned int weight, total_weight = 0;
|
|
|
|
unsigned int tce32_segsz, base, segs, avail, i;
|
2011-11-16 01:29:08 +08:00
|
|
|
int64_t rc;
|
|
|
|
void *addr;
|
|
|
|
|
|
|
|
/* XXX FIXME: Handle 64-bit only DMA devices */
|
|
|
|
/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
|
|
|
|
/* XXX FIXME: Allocate multi-level tables on PHB3 */
|
2016-05-05 10:04:16 +08:00
|
|
|
weight = pnv_pci_ioda_pe_dma_weight(pe);
|
|
|
|
if (!weight)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
|
|
|
|
&total_weight);
|
|
|
|
segs = (weight * phb->ioda.dma32_count) / total_weight;
|
|
|
|
if (!segs)
|
|
|
|
segs = 1;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-05 10:04:16 +08:00
|
|
|
/*
|
|
|
|
* Allocate contiguous DMA32 segments. We begin with the expected
|
|
|
|
* number of segments. With one more attempt, the number of DMA32
|
|
|
|
* segments to be allocated is decreased by one until one segment
|
|
|
|
* is allocated successfully.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
|
|
|
|
for (avail = 0, i = base; i < base + segs; i++) {
|
|
|
|
if (phb->ioda.dma32_segmap[i] ==
|
|
|
|
IODA_INVALID_PE)
|
|
|
|
avail++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avail == segs)
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
} while (--segs);
|
|
|
|
|
|
|
|
if (!segs) {
|
|
|
|
pe_warn(pe, "No available DMA32 segments\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
2015-06-05 14:35:09 +08:00
|
|
|
tbl = pnv_pci_table_alloc(phb->hose->node);
|
2017-03-27 16:27:37 +08:00
|
|
|
if (WARN_ON(!tbl))
|
|
|
|
return;
|
|
|
|
|
2015-06-05 14:35:08 +08:00
|
|
|
iommu_register_group(&pe->table_group, phb->hose->global_number,
|
|
|
|
pe->pe_number);
|
2015-06-05 14:35:09 +08:00
|
|
|
pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
|
2015-06-05 14:34:55 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Grab a 32-bit TCE table */
|
2016-05-05 10:04:16 +08:00
|
|
|
pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
|
|
|
|
weight, total_weight, base, segs);
|
2011-11-16 01:29:08 +08:00
|
|
|
pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
|
2016-05-03 13:41:33 +08:00
|
|
|
base * PNV_IODA1_DMA32_SEGSIZE,
|
|
|
|
(base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* XXX Currently, we allocate one big contiguous table for the
|
|
|
|
* TCEs. We only really need one chunk per 256M of TCE space
|
|
|
|
* (ie per segment) but that's an optimization for later, it
|
|
|
|
* requires some added smarts with our get/put_tce implementation
|
2016-05-03 13:41:33 +08:00
|
|
|
*
|
|
|
|
* Each TCE page is 4KB in size and each TCE entry occupies 8
|
|
|
|
* bytes
|
2011-11-16 01:29:08 +08:00
|
|
|
*/
|
2016-05-03 13:41:33 +08:00
|
|
|
tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
|
2011-11-16 01:29:08 +08:00
|
|
|
tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
|
2016-05-03 13:41:33 +08:00
|
|
|
get_order(tce32_segsz * segs));
|
2011-11-16 01:29:08 +08:00
|
|
|
if (!tce_mem) {
|
|
|
|
pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
addr = page_address(tce_mem);
|
2016-05-03 13:41:33 +08:00
|
|
|
memset(addr, 0, tce32_segsz * segs);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Configure HW */
|
|
|
|
for (i = 0; i < segs; i++) {
|
|
|
|
rc = opal_pci_map_pe_dma_window(phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
base + i, 1,
|
2016-05-03 13:41:33 +08:00
|
|
|
__pa(addr) + tce32_segsz * i,
|
|
|
|
tce32_segsz, IOMMU_PAGE_SIZE_4K);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (rc) {
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n",
|
|
|
|
rc);
|
2011-11-16 01:29:08 +08:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 10:04:16 +08:00
|
|
|
/* Setup DMA32 segment mapping */
|
|
|
|
for (i = base; i < base + segs; i++)
|
|
|
|
phb->ioda.dma32_segmap[i] = pe->pe_number;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Setup linux iommu table */
|
2016-05-03 13:41:33 +08:00
|
|
|
pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
|
|
|
|
base * PNV_IODA1_DMA32_SEGSIZE,
|
|
|
|
IOMMU_PAGE_SHIFT_4K);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2015-06-05 14:35:06 +08:00
|
|
|
tbl->it_ops = &pnv_ioda1_iommu_ops;
|
2015-06-05 14:35:20 +08:00
|
|
|
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
|
|
|
|
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
|
2019-07-18 13:11:39 +08:00
|
|
|
iommu_init_table(tbl, phb->hose->node, 0, 0);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2018-10-16 10:30:03 +08:00
|
|
|
if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
2018-12-19 16:52:22 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
2013-07-01 15:54:09 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
return;
|
|
|
|
fail:
|
|
|
|
/* XXX Failure: Try to fallback to 64-bit only ? */
|
|
|
|
if (tce_mem)
|
2016-05-03 13:41:33 +08:00
|
|
|
__free_pages(tce_mem, get_order(tce32_segsz * segs));
|
2015-06-05 14:35:09 +08:00
|
|
|
if (tbl) {
|
|
|
|
pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2015-06-05 14:35:09 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:18 +08:00
|
|
|
static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
|
|
|
|
int num, struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
int64_t rc;
|
2015-06-05 14:35:19 +08:00
|
|
|
const unsigned long size = tbl->it_indirect_levels ?
|
|
|
|
tbl->it_level_size : tbl->it_size;
|
2015-06-05 14:35:18 +08:00
|
|
|
const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
|
|
|
|
const __u64 win_size = tbl->it_size << tbl->it_page_shift;
|
|
|
|
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n",
|
|
|
|
num, start_addr, start_addr + win_size - 1,
|
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
2015-06-05 14:35:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Map TCE table through TVT. The TVE index is the PE number
|
|
|
|
* shifted by 1 bit for 32-bits DMA space.
|
|
|
|
*/
|
|
|
|
rc = opal_pci_map_pe_dma_window(phb->opal_id,
|
|
|
|
pe->pe_number,
|
2015-06-05 14:35:20 +08:00
|
|
|
(pe->pe_number << 1) + num,
|
2015-06-05 14:35:19 +08:00
|
|
|
tbl->it_indirect_levels + 1,
|
2015-06-05 14:35:18 +08:00
|
|
|
__pa(tbl->it_base),
|
2015-06-05 14:35:19 +08:00
|
|
|
size << 3,
|
2015-06-05 14:35:18 +08:00
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
|
|
|
if (rc) {
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_err(pe, "Failed to configure TCE table, err %lld\n", rc);
|
2015-06-05 14:35:18 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_pci_link_table_and_group(phb->hose->node, num,
|
|
|
|
tbl, &pe->table_group);
|
2016-09-15 15:03:06 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate_pe(pe);
|
2015-06-05 14:35:18 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-25 22:52:37 +08:00
|
|
|
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
|
2014-02-11 08:32:38 +08:00
|
|
|
{
|
|
|
|
uint16_t window_id = (pe->pe_number << 1 ) + 1;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
|
|
|
|
if (enable) {
|
|
|
|
phys_addr_t top = memblock_end_of_DRAM();
|
|
|
|
|
|
|
|
top = roundup_pow_of_two(top);
|
|
|
|
rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
window_id,
|
|
|
|
pe->tce_bypass_base,
|
|
|
|
top);
|
|
|
|
} else {
|
|
|
|
rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
window_id,
|
|
|
|
pe->tce_bypass_base,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
if (rc)
|
|
|
|
pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
|
|
|
|
else
|
|
|
|
pe->tce_bypass_enabled = enable;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:20 +08:00
|
|
|
static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
|
|
|
|
int num, __u32 page_shift, __u64 window_size, __u32 levels,
|
2018-07-04 14:13:47 +08:00
|
|
|
bool alloc_userspace_copy, struct iommu_table **ptbl)
|
2015-06-05 14:35:20 +08:00
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
int nid = pe->phb->hose->node;
|
|
|
|
__u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
|
|
|
|
long ret;
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
|
|
|
|
tbl = pnv_pci_table_alloc(nid);
|
|
|
|
if (!tbl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-03-22 12:21:49 +08:00
|
|
|
tbl->it_ops = &pnv_ioda2_iommu_ops;
|
|
|
|
|
2015-06-05 14:35:20 +08:00
|
|
|
ret = pnv_pci_ioda2_table_alloc_pages(nid,
|
|
|
|
bus_offset, page_shift, window_size,
|
2018-07-04 14:13:47 +08:00
|
|
|
levels, alloc_userspace_copy, tbl);
|
2015-06-05 14:35:20 +08:00
|
|
|
if (ret) {
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2015-06-05 14:35:20 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ptbl = tbl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl = NULL;
|
|
|
|
long rc;
|
2019-07-18 13:11:39 +08:00
|
|
|
unsigned long res_start, res_end;
|
2015-06-05 14:35:23 +08:00
|
|
|
|
2015-09-05 02:22:52 +08:00
|
|
|
/*
|
|
|
|
* crashkernel= specifies the kdump kernel's maximum memory at
|
|
|
|
* some offset and there is no guaranteed the result is a power
|
|
|
|
* of 2, which will cause errors later.
|
|
|
|
*/
|
|
|
|
const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
|
|
|
|
|
powerpc/powernv/pci-ioda: fix 32-bit TCE table init in kdump kernel
When attempting to kdump with the 4.2 kernel, we see for each PCI
device:
pci 0003:01 : [PE# 000] Assign DMA32 space
pci 0003:01 : [PE# 000] Setting up 32-bit TCE table at 0..80000000
pci 0003:01 : [PE# 000] Failed to create 32-bit TCE table, err -22
PCI: Domain 0004 has 8 available 32-bit DMA segments
PCI: 4 PE# for a total weight of 70
pci 0004:01 : [PE# 002] Assign DMA32 space
pci 0004:01 : [PE# 002] Setting up 32-bit TCE table at 0..80000000
pci 0004:01 : [PE# 002] Failed to create 32-bit TCE table, err -22
pci 0004:0d : [PE# 005] Assign DMA32 space
pci 0004:0d : [PE# 005] Setting up 32-bit TCE table at 0..80000000
pci 0004:0d : [PE# 005] Failed to create 32-bit TCE table, err -22
pci 0004:0e : [PE# 006] Assign DMA32 space
pci 0004:0e : [PE# 006] Setting up 32-bit TCE table at 0..80000000
pci 0004:0e : [PE# 006] Failed to create 32-bit TCE table, err -22
pci 0004:10 : [PE# 008] Assign DMA32 space
pci 0004:10 : [PE# 008] Setting up 32-bit TCE table at 0..80000000
pci 0004:10 : [PE# 008] Failed to create 32-bit TCE table, err -22
and eventually the kdump kernel fails to boot as none of the PCI devices
(including the disk controller) are successfully initialized.
The EINVAL response is because the DMA window (the 2GB base window) is
larger than the kdump kernel's reserved memory (crashkernel=, in this
case specified to be 1024M). The check in question,
if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
is a valid sanity check for pnv_pci_ioda2_table_alloc_pages(), so adjust
the caller to pass in a smaller window size if our maximum memory value
is smaller than the DMA window.
After this change, the PCI devices successfully set up the 32-bit TCE
table and kdump succeeds.
The problem was seen on a Firestone machine originally.
Fixes: aca6913f5551 ("powerpc/powernv/ioda2: Introduce helpers to allocate TCE pages")
Cc: stable@vger.kernel.org # 4.2
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[mpe: Coding style pedantry, use u64, change the indentation]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-02 23:39:28 +08:00
|
|
|
/*
|
|
|
|
* In memory constrained environments, e.g. kdump kernel, the
|
|
|
|
* DMA window can be larger than available memory, which will
|
|
|
|
* cause errors later.
|
|
|
|
*/
|
2019-07-18 13:11:39 +08:00
|
|
|
const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1);
|
powerpc/powernv/pci-ioda: fix 32-bit TCE table init in kdump kernel
When attempting to kdump with the 4.2 kernel, we see for each PCI
device:
pci 0003:01 : [PE# 000] Assign DMA32 space
pci 0003:01 : [PE# 000] Setting up 32-bit TCE table at 0..80000000
pci 0003:01 : [PE# 000] Failed to create 32-bit TCE table, err -22
PCI: Domain 0004 has 8 available 32-bit DMA segments
PCI: 4 PE# for a total weight of 70
pci 0004:01 : [PE# 002] Assign DMA32 space
pci 0004:01 : [PE# 002] Setting up 32-bit TCE table at 0..80000000
pci 0004:01 : [PE# 002] Failed to create 32-bit TCE table, err -22
pci 0004:0d : [PE# 005] Assign DMA32 space
pci 0004:0d : [PE# 005] Setting up 32-bit TCE table at 0..80000000
pci 0004:0d : [PE# 005] Failed to create 32-bit TCE table, err -22
pci 0004:0e : [PE# 006] Assign DMA32 space
pci 0004:0e : [PE# 006] Setting up 32-bit TCE table at 0..80000000
pci 0004:0e : [PE# 006] Failed to create 32-bit TCE table, err -22
pci 0004:10 : [PE# 008] Assign DMA32 space
pci 0004:10 : [PE# 008] Setting up 32-bit TCE table at 0..80000000
pci 0004:10 : [PE# 008] Failed to create 32-bit TCE table, err -22
and eventually the kdump kernel fails to boot as none of the PCI devices
(including the disk controller) are successfully initialized.
The EINVAL response is because the DMA window (the 2GB base window) is
larger than the kdump kernel's reserved memory (crashkernel=, in this
case specified to be 1024M). The check in question,
if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
is a valid sanity check for pnv_pci_ioda2_table_alloc_pages(), so adjust
the caller to pass in a smaller window size if our maximum memory value
is smaller than the DMA window.
After this change, the PCI devices successfully set up the 32-bit TCE
table and kdump succeeds.
The problem was seen on a Firestone machine originally.
Fixes: aca6913f5551 ("powerpc/powernv/ioda2: Introduce helpers to allocate TCE pages")
Cc: stable@vger.kernel.org # 4.2
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[mpe: Coding style pedantry, use u64, change the indentation]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-02 23:39:28 +08:00
|
|
|
|
2019-07-18 13:11:39 +08:00
|
|
|
/*
|
|
|
|
* We create the default window as big as we can. The constraint is
|
|
|
|
* the max order of allocation possible. The TCE table is likely to
|
|
|
|
* end up being multilevel and with on-demand allocation in place,
|
|
|
|
* the initial use is not going to be huge as the default window aims
|
|
|
|
* to support crippled devices (i.e. not fully 64bit DMAble) only.
|
|
|
|
*/
|
|
|
|
/* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */
|
|
|
|
const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory);
|
|
|
|
/* Each TCE level cannot exceed maxblock so go multilevel if needed */
|
|
|
|
unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT);
|
|
|
|
unsigned long tcelevel_order = ilog2(maxblock >> 3);
|
|
|
|
unsigned int levels = tces_order / tcelevel_order;
|
|
|
|
|
|
|
|
if (tces_order % tcelevel_order)
|
|
|
|
levels += 1;
|
|
|
|
/*
|
|
|
|
* We try to stick to default levels (which is >1 at the moment) in
|
|
|
|
* order to save memory by relying on on-demain TCE level allocation.
|
|
|
|
*/
|
|
|
|
levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS);
|
|
|
|
|
|
|
|
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT,
|
|
|
|
window_size, levels, false, &tbl);
|
2015-06-05 14:35:23 +08:00
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
|
|
|
|
rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-07-18 13:11:39 +08:00
|
|
|
/* We use top part of 32bit space for MMIO so exclude it from DMA */
|
|
|
|
res_start = 0;
|
|
|
|
res_end = 0;
|
|
|
|
if (window_size > pe->phb->ioda.m32_pci_base) {
|
|
|
|
res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
|
|
|
|
res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
|
|
|
|
}
|
|
|
|
iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end);
|
2015-06-05 14:35:23 +08:00
|
|
|
|
|
|
|
rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
|
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
|
|
|
|
rc);
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2015-06-05 14:35:23 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pnv_iommu_bypass_disabled)
|
|
|
|
pnv_pci_ioda2_set_bypass(pe, true);
|
|
|
|
|
2019-06-28 14:53:00 +08:00
|
|
|
/*
|
|
|
|
* Set table base for the case of IOMMU DMA use. Usually this is done
|
|
|
|
* from dma_dev_setup() which is not called when a device is returned
|
|
|
|
* from VFIO so do it here.
|
|
|
|
*/
|
|
|
|
if (pe->pdev)
|
|
|
|
set_iommu_table_base(&pe->pdev->dev, tbl);
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-15 15:49:59 +08:00
|
|
|
#if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
|
|
|
|
static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
|
|
|
|
int num)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
long ret;
|
|
|
|
|
|
|
|
pe_info(pe, "Removing DMA window #%d\n", num);
|
|
|
|
|
|
|
|
ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
|
|
|
|
(pe->pe_number << 1) + num,
|
|
|
|
0/* levels */, 0/* table address */,
|
|
|
|
0/* table size */, 0/* page size */);
|
|
|
|
if (ret)
|
|
|
|
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
|
|
|
|
else
|
2016-09-15 15:03:06 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate_pe(pe);
|
2015-06-15 15:49:59 +08:00
|
|
|
|
|
|
|
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2018-12-19 16:52:26 +08:00
|
|
|
unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
|
2015-06-05 14:35:22 +08:00
|
|
|
__u64 window_size, __u32 levels)
|
|
|
|
{
|
|
|
|
unsigned long bytes = 0;
|
|
|
|
const unsigned window_shift = ilog2(window_size);
|
|
|
|
unsigned entries_shift = window_shift - page_shift;
|
|
|
|
unsigned table_shift = entries_shift + 3;
|
|
|
|
unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
|
|
|
|
unsigned long direct_table_size;
|
|
|
|
|
|
|
|
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
|
|
|
|
!is_power_of_2(window_size))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Calculate a direct table size from window_size and levels */
|
|
|
|
entries_shift = (entries_shift + levels - 1) / levels;
|
|
|
|
table_shift = entries_shift + 3;
|
|
|
|
table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
|
|
|
|
direct_table_size = 1UL << table_shift;
|
|
|
|
|
|
|
|
for ( ; levels; --levels) {
|
|
|
|
bytes += _ALIGN_UP(tce_table_size, direct_table_size);
|
|
|
|
|
|
|
|
tce_table_size /= direct_table_size;
|
|
|
|
tce_table_size <<= 3;
|
2017-04-13 15:05:27 +08:00
|
|
|
tce_table_size = max_t(unsigned long,
|
|
|
|
tce_table_size, direct_table_size);
|
2015-06-05 14:35:22 +08:00
|
|
|
}
|
|
|
|
|
2018-07-04 14:13:47 +08:00
|
|
|
return bytes + bytes; /* one for HW table, one for userspace copy */
|
|
|
|
}
|
|
|
|
|
|
|
|
static long pnv_pci_ioda2_create_table_userspace(
|
|
|
|
struct iommu_table_group *table_group,
|
|
|
|
int num, __u32 page_shift, __u64 window_size, __u32 levels,
|
|
|
|
struct iommu_table **ptbl)
|
|
|
|
{
|
2019-02-13 11:38:18 +08:00
|
|
|
long ret = pnv_pci_ioda2_create_table(table_group,
|
2018-07-04 14:13:47 +08:00
|
|
|
num, page_shift, window_size, levels, true, ptbl);
|
2019-02-13 11:38:18 +08:00
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
(*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
|
|
|
|
page_shift, window_size, levels);
|
|
|
|
return ret;
|
2015-06-05 14:35:22 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
2014-02-11 08:32:38 +08:00
|
|
|
{
|
2015-06-05 14:35:10 +08:00
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
2015-06-05 14:35:23 +08:00
|
|
|
/* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
|
|
|
|
struct iommu_table *tbl = pe->table_group.tables[0];
|
2014-02-11 08:32:38 +08:00
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
pnv_pci_ioda2_set_bypass(pe, false);
|
2015-06-05 14:35:23 +08:00
|
|
|
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
powerpc/powernv/ioda2: Update iommu table base on ownership change
On POWERNV platform, in order to do DMA via IOMMU (i.e. 32bit DMA in
our case), a device needs an iommu_table pointer set via
set_iommu_table_base().
The codeflow is:
- pnv_pci_ioda2_setup_dma_pe()
- pnv_pci_ioda2_setup_default_config()
- pnv_ioda_setup_bus_dma() [1]
pnv_pci_ioda2_setup_dma_pe() creates IOMMU groups,
pnv_pci_ioda2_setup_default_config() does default DMA setup,
pnv_ioda_setup_bus_dma() takes a bus PE (on IODA2, all physical function
PEs as bus PEs except NPU), walks through all underlying buses and
devices, adds all devices to an IOMMU group and sets iommu_table.
On IODA2, when VFIO is used, it takes ownership over a PE which means it
removes all tables and creates new ones (with a possibility of sharing
them among PEs). So when the ownership is returned from VFIO to
the kernel, the iommu_table pointer written to a device at [1] is
stale and needs an update.
This adds an "add_to_group" parameter to pnv_ioda_setup_bus_dma()
(in fact re-adds as it used to be there a while ago for different
reasons) to tell the helper if a device needs to be added to
an IOMMU group with an iommu_table update or just the latter.
This calls pnv_ioda_setup_bus_dma(..., false) from
pnv_ioda2_release_ownership() so when the ownership is restored,
32bit DMA can work again for a device. This does the same thing
on obtaining ownership as the iommu_table point is stale at this point
anyway and it is safer to have NULL there.
We did not hit this earlier as all tested devices in recent years were
only using 64bit DMA; the rare exception for this is MPT3 SAS adapter
which uses both 32bit and 64bit DMA access and it has not been tested
with VFIO much.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Acked-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-02-21 10:41:31 +08:00
|
|
|
if (pe->pbus)
|
2018-12-19 16:52:22 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
2019-06-28 14:53:00 +08:00
|
|
|
else if (pe->pdev)
|
|
|
|
set_iommu_table_base(&pe->pdev->dev, NULL);
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2015-06-05 14:35:10 +08:00
|
|
|
}
|
2014-02-11 08:32:38 +08:00
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
pnv_pci_ioda2_setup_default_config(pe);
|
powerpc/powernv/ioda2: Update iommu table base on ownership change
On POWERNV platform, in order to do DMA via IOMMU (i.e. 32bit DMA in
our case), a device needs an iommu_table pointer set via
set_iommu_table_base().
The codeflow is:
- pnv_pci_ioda2_setup_dma_pe()
- pnv_pci_ioda2_setup_default_config()
- pnv_ioda_setup_bus_dma() [1]
pnv_pci_ioda2_setup_dma_pe() creates IOMMU groups,
pnv_pci_ioda2_setup_default_config() does default DMA setup,
pnv_ioda_setup_bus_dma() takes a bus PE (on IODA2, all physical function
PEs as bus PEs except NPU), walks through all underlying buses and
devices, adds all devices to an IOMMU group and sets iommu_table.
On IODA2, when VFIO is used, it takes ownership over a PE which means it
removes all tables and creates new ones (with a possibility of sharing
them among PEs). So when the ownership is returned from VFIO to
the kernel, the iommu_table pointer written to a device at [1] is
stale and needs an update.
This adds an "add_to_group" parameter to pnv_ioda_setup_bus_dma()
(in fact re-adds as it used to be there a while ago for different
reasons) to tell the helper if a device needs to be added to
an IOMMU group with an iommu_table update or just the latter.
This calls pnv_ioda_setup_bus_dma(..., false) from
pnv_ioda2_release_ownership() so when the ownership is restored,
32bit DMA can work again for a device. This does the same thing
on obtaining ownership as the iommu_table point is stale at this point
anyway and it is safer to have NULL there.
We did not hit this earlier as all tested devices in recent years were
only using 64bit DMA; the rare exception for this is MPT3 SAS adapter
which uses both 32bit and 64bit DMA access and it has not been tested
with VFIO much.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Acked-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-02-21 10:41:31 +08:00
|
|
|
if (pe->pbus)
|
2018-12-19 16:52:22 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
2014-02-11 08:32:38 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
|
2015-06-05 14:35:22 +08:00
|
|
|
.get_table_size = pnv_pci_ioda2_get_table_size,
|
2018-07-04 14:13:47 +08:00
|
|
|
.create_table = pnv_pci_ioda2_create_table_userspace,
|
2015-06-05 14:35:20 +08:00
|
|
|
.set_window = pnv_pci_ioda2_set_window,
|
|
|
|
.unset_window = pnv_pci_ioda2_unset_window,
|
2015-06-05 14:35:10 +08:00
|
|
|
.take_ownership = pnv_ioda2_take_ownership,
|
|
|
|
.release_ownership = pnv_ioda2_release_ownership,
|
|
|
|
};
|
2016-04-29 16:55:24 +08:00
|
|
|
|
2018-12-19 16:52:22 +08:00
|
|
|
static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe,
|
2018-12-19 16:52:26 +08:00
|
|
|
struct iommu_table_group *table_group,
|
2018-12-19 16:52:22 +08:00
|
|
|
struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
2018-12-19 16:52:26 +08:00
|
|
|
iommu_add_device(table_group, &dev->dev);
|
2018-12-19 16:52:22 +08:00
|
|
|
|
|
|
|
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
|
|
|
pnv_ioda_setup_bus_iommu_group_add_devices(pe,
|
2018-12-19 16:52:26 +08:00
|
|
|
table_group, dev->subordinate);
|
2018-12-19 16:52:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-19 16:52:26 +08:00
|
|
|
static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
|
|
|
|
struct iommu_table_group *table_group, struct pci_bus *bus)
|
2018-12-19 16:52:22 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (pe->flags & PNV_IODA_PE_DEV)
|
2018-12-19 16:52:26 +08:00
|
|
|
iommu_add_device(table_group, &pe->pdev->dev);
|
|
|
|
|
|
|
|
if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus)
|
|
|
|
pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group,
|
|
|
|
bus);
|
2018-12-19 16:52:22 +08:00
|
|
|
}
|
|
|
|
|
2018-12-19 16:52:26 +08:00
|
|
|
static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
|
|
|
|
|
2016-04-29 16:55:24 +08:00
|
|
|
static void pnv_pci_ioda_setup_iommu_api(void)
|
|
|
|
{
|
2018-12-19 16:52:26 +08:00
|
|
|
struct pci_controller *hose;
|
2016-04-29 16:55:24 +08:00
|
|
|
struct pnv_phb *phb;
|
2018-12-19 16:52:26 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2016-04-29 16:55:24 +08:00
|
|
|
|
2018-12-19 16:52:22 +08:00
|
|
|
/*
|
|
|
|
* There are 4 types of PEs:
|
|
|
|
* - PNV_IODA_PE_BUS: a downstream port with an adapter,
|
|
|
|
* created from pnv_pci_setup_bridge();
|
|
|
|
* - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it,
|
|
|
|
* created from pnv_pci_setup_bridge();
|
|
|
|
* - PNV_IODA_PE_VF: a SRIOV virtual function,
|
|
|
|
* created from pnv_pcibios_sriov_enable();
|
|
|
|
* - PNV_IODA_PE_DEV: an NPU or OCAPI device,
|
|
|
|
* created from pnv_pci_ioda_fixup().
|
|
|
|
*
|
|
|
|
* Normally a PE is represented by an IOMMU group, however for
|
|
|
|
* devices with side channels the groups need to be more strict.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
2019-01-09 23:13:42 +08:00
|
|
|
if (phb->type == PNV_PHB_NPU_NVLINK ||
|
|
|
|
phb->type == PNV_PHB_NPU_OCAPI)
|
2018-12-19 16:52:22 +08:00
|
|
|
continue;
|
|
|
|
|
2018-12-19 16:52:26 +08:00
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
|
|
struct iommu_table_group *table_group;
|
|
|
|
|
|
|
|
table_group = pnv_try_setup_npu_table_group(pe);
|
|
|
|
if (!table_group) {
|
|
|
|
if (!pnv_pci_ioda_pe_dma_weight(pe))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
table_group = &pe->table_group;
|
|
|
|
iommu_register_group(&pe->table_group,
|
|
|
|
pe->phb->hose->global_number,
|
|
|
|
pe->pe_number);
|
|
|
|
}
|
|
|
|
pnv_ioda_setup_bus_iommu_group(pe, table_group,
|
|
|
|
pe->pbus);
|
|
|
|
}
|
2018-12-19 16:52:22 +08:00
|
|
|
}
|
|
|
|
|
2016-04-29 16:55:24 +08:00
|
|
|
/*
|
|
|
|
* Now we have all PHBs discovered, time to add NPU devices to
|
|
|
|
* the corresponding IOMMU groups.
|
|
|
|
*/
|
2018-12-19 16:52:26 +08:00
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
|
unsigned long pgsizes;
|
|
|
|
|
2016-04-29 16:55:24 +08:00
|
|
|
phb = hose->private_data;
|
|
|
|
|
2018-01-23 19:31:36 +08:00
|
|
|
if (phb->type != PNV_PHB_NPU_NVLINK)
|
2016-04-29 16:55:24 +08:00
|
|
|
continue;
|
|
|
|
|
2018-12-19 16:52:26 +08:00
|
|
|
pgsizes = pnv_ioda_parse_tce_sizes(phb);
|
2016-04-29 16:55:24 +08:00
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
2018-12-19 16:52:26 +08:00
|
|
|
/*
|
|
|
|
* IODA2 bridges get this set up from
|
|
|
|
* pci_controller_ops::setup_bridge but NPU bridges
|
|
|
|
* do not have this hook defined so we do it here.
|
|
|
|
*/
|
|
|
|
pe->table_group.pgsizes = pgsizes;
|
|
|
|
pnv_npu_compound_attach(pe);
|
2016-04-29 16:55:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_IOMMU_API */
|
|
|
|
static void pnv_pci_ioda_setup_iommu_api(void) { };
|
2015-06-05 14:35:10 +08:00
|
|
|
#endif
|
|
|
|
|
2018-05-14 17:39:22 +08:00
|
|
|
static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = phb->hose;
|
|
|
|
struct device_node *dn = hose->dn;
|
|
|
|
unsigned long mask = 0;
|
|
|
|
int i, rc, count;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
|
|
|
|
if (count <= 0) {
|
|
|
|
mask = SZ_4K | SZ_64K;
|
|
|
|
/* Add 16M for POWER8 by default */
|
|
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
|
|
|
|
!cpu_has_feature(CPU_FTR_ARCH_300))
|
2018-07-02 15:42:05 +08:00
|
|
|
mask |= SZ_16M | SZ_256M;
|
2018-05-14 17:39:22 +08:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
|
|
|
|
i, &val);
|
|
|
|
if (rc == 0)
|
|
|
|
mask |= 1ULL << val;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe)
|
|
|
|
{
|
2013-04-26 03:21:01 +08:00
|
|
|
int64_t rc;
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
if (!pnv_pci_ioda_pe_dma_weight(pe))
|
|
|
|
return;
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
/* TVE #1 is selected by PCI address bit 59 */
|
|
|
|
pe->tce_bypass_base = 1ull << 59;
|
|
|
|
|
2013-04-26 03:21:01 +08:00
|
|
|
/* The PE will reserve all possible 32-bits space */
|
|
|
|
pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
|
2015-06-05 14:35:17 +08:00
|
|
|
phb->ioda.m32_pci_base);
|
2013-04-26 03:21:01 +08:00
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
/* Setup linux iommu table */
|
2015-06-05 14:35:20 +08:00
|
|
|
pe->table_group.tce32_start = 0;
|
|
|
|
pe->table_group.tce32_size = phb->ioda.m32_pci_base;
|
|
|
|
pe->table_group.max_dynamic_windows_supported =
|
|
|
|
IOMMU_TABLE_GROUP_MAX_TABLES;
|
|
|
|
pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
|
2018-05-14 17:39:22 +08:00
|
|
|
pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
|
2015-06-05 14:35:16 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
pe->table_group.ops = &pnv_pci_ioda2_ops;
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
rc = pnv_pci_ioda2_setup_default_config(pe);
|
2016-05-03 13:41:34 +08:00
|
|
|
if (rc)
|
2015-06-05 14:35:23 +08:00
|
|
|
return;
|
2013-04-26 03:21:01 +08:00
|
|
|
|
2017-02-21 10:40:20 +08:00
|
|
|
if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
2018-12-19 16:52:22 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
2013-04-26 03:21:01 +08:00
|
|
|
}
|
|
|
|
|
2016-08-19 13:35:49 +08:00
|
|
|
int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
|
2013-04-26 03:20:59 +08:00
|
|
|
{
|
|
|
|
struct pnv_phb *phb = container_of(chip, struct pnv_phb,
|
|
|
|
ioda.irq_chip);
|
2016-08-19 13:35:49 +08:00
|
|
|
|
|
|
|
return opal_pci_msi_eoi(phb->opal_id, hw_irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda2_msi_eoi(struct irq_data *d)
|
|
|
|
{
|
2013-04-26 03:20:59 +08:00
|
|
|
int64_t rc;
|
2016-08-19 13:35:49 +08:00
|
|
|
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
|
|
|
|
struct irq_chip *chip = irq_data_get_irq_chip(d);
|
2013-04-26 03:20:59 +08:00
|
|
|
|
2016-08-19 13:35:49 +08:00
|
|
|
rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
|
2013-04-26 03:20:59 +08:00
|
|
|
WARN_ON_ONCE(rc);
|
|
|
|
|
|
|
|
icp_native_eoi(d);
|
|
|
|
}
|
|
|
|
|
2014-10-08 16:54:55 +08:00
|
|
|
|
2016-07-14 05:17:00 +08:00
|
|
|
void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
|
2014-10-08 16:54:55 +08:00
|
|
|
{
|
|
|
|
struct irq_data *idata;
|
|
|
|
struct irq_chip *ichip;
|
|
|
|
|
2016-07-08 14:37:09 +08:00
|
|
|
/* The MSI EOI OPAL call is only needed on PHB3 */
|
|
|
|
if (phb->model != PNV_PHB_MODEL_PHB3)
|
2014-10-08 16:54:55 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!phb->ioda.irq_chip_init) {
|
|
|
|
/*
|
|
|
|
* First time we setup an MSI IRQ, we need to setup the
|
|
|
|
* corresponding IRQ chip to route correctly.
|
|
|
|
*/
|
|
|
|
idata = irq_get_irq_data(virq);
|
|
|
|
ichip = irq_data_get_irq_chip(idata);
|
|
|
|
phb->ioda.irq_chip_init = 1;
|
|
|
|
phb->ioda.irq_chip = *ichip;
|
|
|
|
phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
|
|
|
|
}
|
|
|
|
irq_set_chip(virq, &phb->ioda.irq_chip);
|
|
|
|
}
|
|
|
|
|
2016-08-19 13:35:49 +08:00
|
|
|
/*
|
|
|
|
* Returns true iff chip is something that we could call
|
|
|
|
* pnv_opal_pci_msi_eoi for.
|
|
|
|
*/
|
|
|
|
bool is_pnv_opal_msi(struct irq_chip *chip)
|
|
|
|
{
|
|
|
|
return chip->irq_eoi == pnv_ioda2_msi_eoi;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
|
2013-04-26 03:20:59 +08:00
|
|
|
unsigned int hwirq, unsigned int virq,
|
|
|
|
unsigned int is_64, struct msi_msg *msg)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
|
|
|
|
unsigned int xive_num = hwirq - phb->msi_base;
|
2013-09-23 10:05:01 +08:00
|
|
|
__be32 data;
|
2011-11-16 01:29:08 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* No PE assigned ? bail out ... no MSI for you ! */
|
|
|
|
if (pe == NULL)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
/* Check if we have an MVE */
|
|
|
|
if (pe->mve_number < 0)
|
|
|
|
return -ENXIO;
|
|
|
|
|
2013-05-22 06:58:21 +08:00
|
|
|
/* Force 32-bit MSI on some broken devices */
|
2014-10-07 13:12:36 +08:00
|
|
|
if (dev->no_64bit_msi)
|
2013-05-22 06:58:21 +08:00
|
|
|
is_64 = 0;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Assign XIVE to PE */
|
|
|
|
rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
|
|
|
|
pci_name(dev), rc, xive_num);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_64) {
|
2013-09-23 10:05:01 +08:00
|
|
|
__be64 addr64;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
|
|
|
|
&addr64, &data);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
|
|
|
|
pci_name(dev), rc);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-09-23 10:05:01 +08:00
|
|
|
msg->address_hi = be64_to_cpu(addr64) >> 32;
|
|
|
|
msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
|
2011-11-16 01:29:08 +08:00
|
|
|
} else {
|
2013-09-23 10:05:01 +08:00
|
|
|
__be32 addr32;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
|
|
|
|
&addr32, &data);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
|
|
|
|
pci_name(dev), rc);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
msg->address_hi = 0;
|
2013-09-23 10:05:01 +08:00
|
|
|
msg->address_lo = be32_to_cpu(addr32);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
2013-09-23 10:05:01 +08:00
|
|
|
msg->data = be32_to_cpu(data);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-07-14 05:17:00 +08:00
|
|
|
pnv_set_msi_irq_chip(phb, virq);
|
2013-04-26 03:20:59 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
|
2016-11-16 11:02:15 +08:00
|
|
|
" address=%x_%08x data=%x PE# %x\n",
|
2011-11-16 01:29:08 +08:00
|
|
|
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
|
|
|
|
msg->address_hi, msg->address_lo, data, pe->pe_number);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
|
|
|
|
{
|
2013-03-06 05:12:37 +08:00
|
|
|
unsigned int count;
|
2011-11-16 01:29:08 +08:00
|
|
|
const __be32 *prop = of_get_property(phb->hose->dn,
|
|
|
|
"ibm,opal-msi-ranges", NULL);
|
|
|
|
if (!prop) {
|
|
|
|
/* BML Fallback */
|
|
|
|
prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
|
|
|
|
}
|
|
|
|
if (!prop)
|
|
|
|
return;
|
|
|
|
|
|
|
|
phb->msi_base = be32_to_cpup(prop);
|
2013-03-06 05:12:37 +08:00
|
|
|
count = be32_to_cpup(prop + 1);
|
|
|
|
if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
|
2011-11-16 01:29:08 +08:00
|
|
|
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
|
|
|
|
phb->hose->global_number);
|
|
|
|
return;
|
|
|
|
}
|
2013-03-06 05:12:37 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->msi_setup = pnv_pci_ioda_msi_setup;
|
|
|
|
phb->msi32_support = 1;
|
|
|
|
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
|
2013-03-06 05:12:37 +08:00
|
|
|
count, phb->msi_base);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:55 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
|
|
|
|
{
|
2015-10-22 09:22:17 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
const resource_size_t gate = phb->ioda.m64_segsize >> 2;
|
2015-03-25 16:23:55 +08:00
|
|
|
struct resource *res;
|
|
|
|
int i;
|
2015-10-22 09:22:18 +08:00
|
|
|
resource_size_t size, total_vf_bar_sz;
|
2015-03-25 16:23:55 +08:00
|
|
|
struct pci_dn *pdn;
|
2015-03-25 16:23:58 +08:00
|
|
|
int mul, total_vfs;
|
2015-03-25 16:23:55 +08:00
|
|
|
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
pdn->vfs_expanded = 0;
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_single_mode = false;
|
2015-03-25 16:23:55 +08:00
|
|
|
|
2015-03-25 16:23:58 +08:00
|
|
|
total_vfs = pci_sriov_get_totalvfs(pdev);
|
2016-05-03 13:41:24 +08:00
|
|
|
mul = phb->ioda.total_pe_num;
|
2015-10-22 09:22:18 +08:00
|
|
|
total_vf_bar_sz = 0;
|
2015-03-25 16:23:58 +08:00
|
|
|
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || res->parent)
|
|
|
|
continue;
|
2016-09-14 14:37:17 +08:00
|
|
|
if (!pnv_pci_is_m64_flags(res->flags)) {
|
2015-10-22 09:22:14 +08:00
|
|
|
dev_warn(&pdev->dev, "Don't support SR-IOV with"
|
|
|
|
" non M64 VF BAR%d: %pR. \n",
|
2015-03-25 16:23:58 +08:00
|
|
|
i, res);
|
2015-10-22 09:22:14 +08:00
|
|
|
goto truncate_iov;
|
2015-03-25 16:23:58 +08:00
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:18 +08:00
|
|
|
total_vf_bar_sz += pci_iov_resource_size(pdev,
|
|
|
|
i + PCI_IOV_RESOURCES);
|
2015-03-25 16:23:58 +08:00
|
|
|
|
2015-10-22 09:22:17 +08:00
|
|
|
/*
|
|
|
|
* If bigger than quarter of M64 segment size, just round up
|
|
|
|
* power of two.
|
|
|
|
*
|
|
|
|
* Generally, one M64 BAR maps one IOV BAR. To avoid conflict
|
|
|
|
* with other devices, IOV BAR size is expanded to be
|
|
|
|
* (total_pe * VF_BAR_size). When VF_BAR_size is half of M64
|
|
|
|
* segment size , the expanded size would equal to half of the
|
|
|
|
* whole M64 space size, which will exhaust the M64 Space and
|
|
|
|
* limit the system flexibility. This is a design decision to
|
|
|
|
* set the boundary to quarter of the M64 segment size.
|
|
|
|
*/
|
2015-10-22 09:22:18 +08:00
|
|
|
if (total_vf_bar_sz > gate) {
|
2015-03-25 16:23:58 +08:00
|
|
|
mul = roundup_pow_of_two(total_vfs);
|
2015-10-22 09:22:18 +08:00
|
|
|
dev_info(&pdev->dev,
|
|
|
|
"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
|
|
|
|
total_vf_bar_sz, gate, mul);
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_single_mode = true;
|
2015-03-25 16:23:58 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:55 +08:00
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || res->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
|
2015-10-22 09:22:16 +08:00
|
|
|
/*
|
|
|
|
* On PHB3, the minimum size alignment of M64 BAR in single
|
|
|
|
* mode is 32MB.
|
|
|
|
*/
|
|
|
|
if (pdn->m64_single_mode && (size < SZ_32M))
|
|
|
|
goto truncate_iov;
|
|
|
|
dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
|
2015-03-25 16:23:58 +08:00
|
|
|
res->end = res->start + size * mul - 1;
|
2015-03-25 16:23:55 +08:00
|
|
|
dev_dbg(&pdev->dev, " %pR\n", res);
|
|
|
|
dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
|
2015-03-25 16:23:58 +08:00
|
|
|
i, res, mul);
|
2015-03-25 16:23:55 +08:00
|
|
|
}
|
2015-03-25 16:23:58 +08:00
|
|
|
pdn->vfs_expanded = mul;
|
2015-10-22 09:22:14 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
truncate_iov:
|
|
|
|
/* To save MMIO space, IOV BAR is truncated. */
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
res->flags = 0;
|
|
|
|
res->end = res->start - 1;
|
|
|
|
}
|
2015-03-25 16:23:55 +08:00
|
|
|
}
|
2020-01-10 15:02:05 +08:00
|
|
|
|
|
|
|
static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
if (WARN_ON(pci_dev_is_added(pdev)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pdev->is_virtfn) {
|
|
|
|
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VF PEs are single-device PEs so their pdev pointer needs to
|
|
|
|
* be set. The pdev doesn't exist when the PE is allocated (in
|
|
|
|
* (pcibios_sriov_enable()) so we fix it up here.
|
|
|
|
*/
|
|
|
|
pe->pdev = pdev;
|
|
|
|
WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
|
|
|
|
} else if (pdev->is_physfn) {
|
|
|
|
/*
|
|
|
|
* For PFs adjust their allocated IOV resources to match what
|
|
|
|
* the PHB can support using it's M64 BAR table.
|
|
|
|
*/
|
|
|
|
pnv_pci_ioda_fixup_iov_resources(pdev);
|
|
|
|
}
|
|
|
|
}
|
2015-03-25 16:23:55 +08:00
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2016-05-03 13:41:27 +08:00
|
|
|
static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
|
|
|
|
struct resource *res)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
struct pci_bus_region region;
|
|
|
|
int index;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!res || !res->flags || res->start > res->end)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (res->flags & IORESOURCE_IO) {
|
|
|
|
region.start = res->start - phb->ioda.io_pci_base;
|
|
|
|
region.end = res->end - phb->ioda.io_pci_base;
|
|
|
|
index = region.start / phb->ioda.io_segsize;
|
|
|
|
|
|
|
|
while (index < phb->ioda.total_pe_num &&
|
|
|
|
region.start <= region.end) {
|
|
|
|
phb->ioda.io_segmap[index] = pe->pe_number;
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
|
2016-05-03 13:41:27 +08:00
|
|
|
__func__, rc, index, pe->pe_number);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
region.start += phb->ioda.io_segsize;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
} else if ((res->flags & IORESOURCE_MEM) &&
|
2016-07-08 13:55:43 +08:00
|
|
|
!pnv_pci_is_m64(phb, res)) {
|
2016-05-03 13:41:27 +08:00
|
|
|
region.start = res->start -
|
|
|
|
phb->hose->mem_offset[0] -
|
|
|
|
phb->ioda.m32_pci_base;
|
|
|
|
region.end = res->end -
|
|
|
|
phb->hose->mem_offset[0] -
|
|
|
|
phb->ioda.m32_pci_base;
|
|
|
|
index = region.start / phb->ioda.m32_segsize;
|
|
|
|
|
|
|
|
while (index < phb->ioda.total_pe_num &&
|
|
|
|
region.start <= region.end) {
|
|
|
|
phb->ioda.m32_segmap[index] = pe->pe_number;
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
|
2016-05-03 13:41:27 +08:00
|
|
|
__func__, rc, index, pe->pe_number);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
region.start += phb->ioda.m32_segsize;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-20 11:49:16 +08:00
|
|
|
/*
|
|
|
|
* This function is supposed to be called on basis of PE from top
|
|
|
|
* to bottom style. So the the I/O or MMIO segment assigned to
|
2017-02-28 06:29:28 +08:00
|
|
|
* parent PE could be overridden by its child PEs if necessary.
|
2012-08-20 11:49:16 +08:00
|
|
|
*/
|
2016-05-03 13:41:27 +08:00
|
|
|
static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
|
2012-08-20 11:49:16 +08:00
|
|
|
{
|
2016-05-03 13:41:28 +08:00
|
|
|
struct pci_dev *pdev;
|
2016-05-03 13:41:27 +08:00
|
|
|
int i;
|
2012-08-20 11:49:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: We only care PCI bus based PE for now. For PCI
|
|
|
|
* device based PE, for example SRIOV sensitive VF should
|
|
|
|
* be figured out later.
|
|
|
|
*/
|
|
|
|
BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
|
|
|
|
|
2016-05-03 13:41:28 +08:00
|
|
|
list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
|
|
|
|
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
|
|
|
|
pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PE contains all subordinate PCI buses, the
|
|
|
|
* windows of the child bridges should be mapped to
|
|
|
|
* the PE as well.
|
|
|
|
*/
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
|
|
|
|
continue;
|
|
|
|
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
|
|
|
|
pnv_ioda_setup_pe_res(pe,
|
|
|
|
&pdev->resource[PCI_BRIDGE_RESOURCES + i]);
|
|
|
|
}
|
2012-08-20 11:49:16 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 13:05:03 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static int pnv_pci_diag_data_set(void *data, u64 val)
|
|
|
|
{
|
2019-09-12 13:29:43 +08:00
|
|
|
struct pnv_phb *phb = data;
|
2016-07-28 13:05:03 +08:00
|
|
|
s64 ret;
|
|
|
|
|
|
|
|
/* Retrieve the diag data from firmware */
|
2017-06-14 12:19:59 +08:00
|
|
|
ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
|
|
|
|
phb->diag_data_size);
|
2016-07-28 13:05:03 +08:00
|
|
|
if (ret != OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* Print the diag data to the kernel log */
|
2017-06-14 12:19:59 +08:00
|
|
|
pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
|
2016-07-28 13:05:03 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-25 10:44:36 +08:00
|
|
|
DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set,
|
|
|
|
"%llu\n");
|
2016-07-28 13:05:03 +08:00
|
|
|
|
2019-09-12 13:29:45 +08:00
|
|
|
static int pnv_pci_ioda_pe_dump(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = data;
|
|
|
|
int pe_num;
|
|
|
|
|
|
|
|
for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
|
|
|
|
struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
|
|
|
|
if (!test_bit(pe_num, phb->ioda.pe_alloc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n",
|
|
|
|
pe->rid, pe->device_count,
|
|
|
|
(pe->flags & PNV_IODA_PE_DEV) ? "dev " : "",
|
|
|
|
(pe->flags & PNV_IODA_PE_BUS) ? "bus " : "",
|
|
|
|
(pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "",
|
|
|
|
(pe->flags & PNV_IODA_PE_MASTER) ? "master " : "",
|
|
|
|
(pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "",
|
|
|
|
(pe->flags & PNV_IODA_PE_VF) ? "vf " : "");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL,
|
|
|
|
pnv_pci_ioda_pe_dump, "%llu\n");
|
|
|
|
|
2016-07-28 13:05:03 +08:00
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
|
2013-06-20 18:13:25 +08:00
|
|
|
static void pnv_pci_ioda_create_dbgfs(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
struct pci_controller *hose, *tmp;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
char name[16];
|
|
|
|
|
|
|
|
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
/* Notify initialization of PHB done */
|
|
|
|
phb->initialized = 1;
|
|
|
|
|
2013-06-20 18:13:25 +08:00
|
|
|
sprintf(name, "PCI%04x", hose->global_number);
|
|
|
|
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
|
2016-07-28 13:05:03 +08:00
|
|
|
if (!phb->dbgfs) {
|
2016-10-25 12:00:08 +08:00
|
|
|
pr_warn("%s: Error on creating debugfs on PHB#%x\n",
|
2013-06-20 18:13:25 +08:00
|
|
|
__func__, hose->global_number);
|
2016-07-28 13:05:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-12-25 10:44:36 +08:00
|
|
|
debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
|
2019-09-12 13:29:43 +08:00
|
|
|
phb, &pnv_pci_diag_data_fops);
|
2019-09-12 13:29:45 +08:00
|
|
|
debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs,
|
|
|
|
phb, &pnv_pci_ioda_pe_dump_fops);
|
2013-06-20 18:13:25 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
}
|
|
|
|
|
2018-08-17 15:30:39 +08:00
|
|
|
static void pnv_pci_enable_bridge(struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev = bus->self;
|
|
|
|
struct pci_bus *child;
|
|
|
|
|
|
|
|
/* Empty bus ? bail */
|
|
|
|
if (list_empty(&bus->devices))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there's a bridge associated with that bus enable it. This works
|
|
|
|
* around races in the generic code if the enabling is done during
|
|
|
|
* parallel probing. This can be removed once those races have been
|
|
|
|
* fixed.
|
|
|
|
*/
|
|
|
|
if (dev) {
|
|
|
|
int rc = pci_enable_device(dev);
|
|
|
|
if (rc)
|
|
|
|
pci_err(dev, "Error enabling bridge (%d)\n", rc);
|
|
|
|
pci_set_master(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the same to child busses */
|
|
|
|
list_for_each_entry(child, &bus->children, node)
|
|
|
|
pnv_pci_enable_bridge(child);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_enable_bridges(void)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
|
|
|
|
list_for_each_entry(hose, &hose_list, list_node)
|
|
|
|
pnv_pci_enable_bridge(hose->bus);
|
|
|
|
}
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static void pnv_pci_ioda_fixup(void)
|
2012-08-20 11:49:14 +08:00
|
|
|
{
|
|
|
|
pnv_pci_ioda_setup_PEs();
|
2016-05-20 14:41:31 +08:00
|
|
|
pnv_pci_ioda_setup_iommu_api();
|
2013-06-20 18:13:25 +08:00
|
|
|
pnv_pci_ioda_create_dbgfs();
|
|
|
|
|
2018-08-17 15:30:39 +08:00
|
|
|
pnv_pci_enable_bridges();
|
|
|
|
|
2013-06-20 13:21:14 +08:00
|
|
|
#ifdef CONFIG_EEH
|
2017-09-07 14:35:44 +08:00
|
|
|
pnv_eeh_post_init();
|
2013-06-20 13:21:14 +08:00
|
|
|
#endif
|
2012-08-20 11:49:14 +08:00
|
|
|
}
|
|
|
|
|
2012-09-12 06:59:47 +08:00
|
|
|
/*
|
|
|
|
* Returns the alignment for I/O or memory windows for P2P
|
|
|
|
* bridges. That actually depends on how PEs are segmented.
|
|
|
|
* For now, we return I/O or M32 segment size for PE sensitive
|
|
|
|
* P2P bridges. Otherwise, the default values (4KiB for I/O,
|
|
|
|
* 1MiB for memory) will be returned.
|
|
|
|
*
|
|
|
|
* The current PCI bus might be put into one PE, which was
|
|
|
|
* create against the parent PCI bridge. For that case, we
|
|
|
|
* needn't enlarge the alignment so that we can save some
|
|
|
|
* resources.
|
|
|
|
*/
|
|
|
|
static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
|
|
|
|
unsigned long type)
|
|
|
|
{
|
|
|
|
struct pci_dev *bridge;
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
int num_pci_bridges = 0;
|
|
|
|
|
|
|
|
bridge = bus->self;
|
|
|
|
while (bridge) {
|
|
|
|
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
|
|
|
|
num_pci_bridges++;
|
|
|
|
if (num_pci_bridges >= 2)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bridge = bridge->bus->self;
|
|
|
|
}
|
|
|
|
|
2016-07-08 13:55:43 +08:00
|
|
|
/*
|
|
|
|
* We fall back to M32 if M64 isn't supported. We enforce the M64
|
|
|
|
* alignment for any 64-bit resource, PCIe doesn't care and
|
|
|
|
* bridges only do 64-bit prefetchable anyway.
|
|
|
|
*/
|
2016-09-14 14:37:17 +08:00
|
|
|
if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
|
2014-07-21 12:42:30 +08:00
|
|
|
return phb->ioda.m64_segsize;
|
2012-09-12 06:59:47 +08:00
|
|
|
if (type & IORESOURCE_MEM)
|
|
|
|
return phb->ioda.m32_segsize;
|
|
|
|
|
|
|
|
return phb->ioda.io_segsize;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:33 +08:00
|
|
|
/*
|
|
|
|
* We are updating root port or the upstream port of the
|
|
|
|
* bridge behind the root port with PHB's windows in order
|
|
|
|
* to accommodate the changes on required resources during
|
|
|
|
* PCI (slot) hotplug, which is connected to either root
|
|
|
|
* port or the downstream ports of PCIe switch behind the
|
|
|
|
* root port.
|
|
|
|
*/
|
|
|
|
static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
|
|
|
|
unsigned long type)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dev *bridge = bus->self;
|
|
|
|
struct resource *r, *w;
|
|
|
|
bool msi_region = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Check if we need apply fixup to the bridge's windows */
|
|
|
|
if (!pci_is_root_bus(bridge->bus) &&
|
|
|
|
!pci_is_root_bus(bridge->bus->self->bus))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Fixup the resources */
|
|
|
|
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
|
|
|
|
r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
|
|
|
|
if (!r->flags || !r->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
w = NULL;
|
|
|
|
if (r->flags & type & IORESOURCE_IO)
|
|
|
|
w = &hose->io_resource;
|
2016-07-08 13:55:43 +08:00
|
|
|
else if (pnv_pci_is_m64(phb, r) &&
|
2016-05-20 14:41:33 +08:00
|
|
|
(type & IORESOURCE_PREFETCH) &&
|
|
|
|
phb->ioda.m64_segsize)
|
|
|
|
w = &hose->mem_resources[1];
|
|
|
|
else if (r->flags & type & IORESOURCE_MEM) {
|
|
|
|
w = &hose->mem_resources[0];
|
|
|
|
msi_region = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
r->start = w->start;
|
|
|
|
r->end = w->end;
|
|
|
|
|
|
|
|
/* The 64KB 32-bits MSI region shouldn't be included in
|
|
|
|
* the 32-bits bridge window. Otherwise, we can see strange
|
|
|
|
* issues. One of them is EEH error observed on Garrison.
|
|
|
|
*
|
|
|
|
* Exclude top 1MB region which is the minimal alignment of
|
|
|
|
* 32-bits bridge window.
|
|
|
|
*/
|
|
|
|
if (msi_region) {
|
|
|
|
r->end += 0x10000;
|
|
|
|
r->end -= 0x100000;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dev *bridge = bus->self;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
|
|
|
|
|
2016-05-20 14:41:33 +08:00
|
|
|
/* Extend bridge's windows if necessary */
|
|
|
|
pnv_pci_fixup_bridge_resources(bus, type);
|
|
|
|
|
2016-05-20 14:41:32 +08:00
|
|
|
/* The PE for root bus should be realized before any one else */
|
|
|
|
if (!phb->ioda.root_pe_populated) {
|
|
|
|
pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
|
|
|
|
if (pe) {
|
|
|
|
phb->ioda.root_pe_idx = pe->pe_number;
|
|
|
|
phb->ioda.root_pe_populated = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
/* Don't assign PE to PCI bus, which doesn't have subordinate devices */
|
|
|
|
if (list_empty(&bus->devices))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Reserve PEs according to used M64 resources */
|
2018-10-16 10:34:09 +08:00
|
|
|
pnv_ioda_reserve_m64_pe(bus, NULL, all);
|
2016-05-20 14:41:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Assign PE. We might run here because of partial hotplug.
|
|
|
|
* For the case, we just pick up the existing PE and should
|
|
|
|
* not allocate resources again.
|
|
|
|
*/
|
|
|
|
pe = pnv_ioda_setup_bus_PE(bus, all);
|
|
|
|
if (!pe)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pnv_ioda_setup_pe_seg(pe);
|
|
|
|
switch (phb->type) {
|
|
|
|
case PNV_PHB_IODA1:
|
|
|
|
pnv_pci_ioda1_setup_dma_pe(phb, pe);
|
|
|
|
break;
|
|
|
|
case PNV_PHB_IODA2:
|
|
|
|
pnv_pci_ioda2_setup_dma_pe(phb, pe);
|
|
|
|
break;
|
|
|
|
default:
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: No DMA for PHB#%x (type %d)\n",
|
2016-05-20 14:41:31 +08:00
|
|
|
__func__, phb->hose->global_number, phb->type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-10 19:58:13 +08:00
|
|
|
static resource_size_t pnv_pci_default_alignment(void)
|
|
|
|
{
|
|
|
|
return PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:56 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
|
|
|
|
int resno)
|
|
|
|
{
|
2015-10-22 09:22:16 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2015-03-25 16:23:56 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
2015-10-22 09:22:15 +08:00
|
|
|
resource_size_t align;
|
2015-03-25 16:23:56 +08:00
|
|
|
|
2015-10-22 09:22:15 +08:00
|
|
|
/*
|
|
|
|
* On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
|
|
|
|
* SR-IOV. While from hardware perspective, the range mapped by M64
|
|
|
|
* BAR should be size aligned.
|
|
|
|
*
|
2015-10-22 09:22:16 +08:00
|
|
|
* When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
|
|
|
|
* powernv-specific hardware restriction is gone. But if just use the
|
|
|
|
* VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
|
|
|
|
* in one segment of M64 #15, which introduces the PE conflict between
|
|
|
|
* PF and VF. Based on this, the minimum alignment of an IOV BAR is
|
|
|
|
* m64_segsize.
|
|
|
|
*
|
2015-10-22 09:22:15 +08:00
|
|
|
* This function returns the total IOV BAR size if M64 BAR is in
|
|
|
|
* Shared PE mode or just VF BAR size if not.
|
2015-10-22 09:22:16 +08:00
|
|
|
* If the M64 BAR is in Single PE mode, return the VF BAR size or
|
|
|
|
* M64 segment size if IOV BAR size is less.
|
2015-10-22 09:22:15 +08:00
|
|
|
*/
|
2015-03-25 16:23:56 +08:00
|
|
|
align = pci_iov_resource_size(pdev, resno);
|
2015-10-22 09:22:15 +08:00
|
|
|
if (!pdn->vfs_expanded)
|
|
|
|
return align;
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
return max(align, (resource_size_t)phb->ioda.m64_segsize);
|
2015-03-25 16:23:56 +08:00
|
|
|
|
2015-10-22 09:22:15 +08:00
|
|
|
return pdn->vfs_expanded * align;
|
2015-03-25 16:23:56 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Prevent enabling devices for which we couldn't properly
|
|
|
|
* assign a PE
|
|
|
|
*/
|
2018-06-28 18:05:06 +08:00
|
|
|
static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2012-08-20 11:49:18 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dn *pdn;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2012-08-20 11:49:18 +08:00
|
|
|
/* The function is probably called while the PEs have
|
|
|
|
* not be created yet. For example, resource reassignment
|
|
|
|
* during PCI probe period. We just skip the check if
|
|
|
|
* PEs isn't ready.
|
|
|
|
*/
|
|
|
|
if (!phb->initialized)
|
2015-03-31 13:00:41 +08:00
|
|
|
return true;
|
2012-08-20 11:49:18 +08:00
|
|
|
|
2013-05-22 06:58:21 +08:00
|
|
|
pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
2015-03-31 13:00:41 +08:00
|
|
|
return false;
|
2012-08-20 11:49:18 +08:00
|
|
|
|
2015-03-31 13:00:41 +08:00
|
|
|
return true;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2019-11-21 21:49:10 +08:00
|
|
|
static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
if (!phb->initialized)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
pdn = pci_get_pdn(dev);
|
|
|
|
if (!pdn)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (pdn->pe_number == IODA_INVALID_PE) {
|
|
|
|
pe = pnv_ioda_setup_dev_PE(dev);
|
|
|
|
if (!pe)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
|
|
|
|
int num)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group,
|
|
|
|
struct pnv_ioda_pe, table_group);
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
unsigned int idx;
|
|
|
|
long rc;
|
|
|
|
|
|
|
|
pe_info(pe, "Removing DMA window #%d\n", num);
|
|
|
|
for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
|
|
|
|
if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
|
|
|
|
idx, 0, 0ul, 0ul, 0ul);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
|
|
|
|
rc, idx);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
|
|
|
|
return OPAL_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
|
|
|
|
struct iommu_table *tbl = pe->table_group.tables[0];
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!weight)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
return;
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
|
2016-05-20 14:41:35 +08:00
|
|
|
if (pe->table_group.group) {
|
|
|
|
iommu_group_put(pe->table_group.group);
|
|
|
|
WARN_ON(pe->table_group.group);
|
|
|
|
}
|
|
|
|
|
|
|
|
free_pages(tbl->it_base, get_order(tbl->it_size << 3));
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2016-05-20 14:41:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl = pe->table_group.tables[0];
|
|
|
|
unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
|
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
int64_t rc;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!weight)
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
|
|
|
if (rc)
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
|
2016-05-20 14:41:35 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
pnv_pci_ioda2_set_bypass(pe, false);
|
|
|
|
if (pe->table_group.group) {
|
|
|
|
iommu_group_put(pe->table_group.group);
|
|
|
|
WARN_ON(pe->table_group.group);
|
|
|
|
}
|
|
|
|
|
2017-03-22 12:21:50 +08:00
|
|
|
iommu_tce_table_put(tbl);
|
2016-05-20 14:41:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
|
|
|
|
unsigned short win,
|
|
|
|
unsigned int *map)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
int idx;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
|
|
|
|
if (map[idx] != pe->pe_number)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (win == OPAL_M64_WINDOW_TYPE)
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
phb->ioda.reserved_pe_idx, win,
|
|
|
|
idx / PNV_IODA1_M64_SEGS,
|
|
|
|
idx % PNV_IODA1_M64_SEGS);
|
|
|
|
else
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
phb->ioda.reserved_pe_idx, win, 0, idx);
|
|
|
|
|
|
|
|
if (rc != OPAL_SUCCESS)
|
2017-03-30 18:19:25 +08:00
|
|
|
pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
|
2016-05-20 14:41:35 +08:00
|
|
|
rc, win, idx);
|
|
|
|
|
|
|
|
map[idx] = IODA_INVALID_PE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
|
|
|
|
phb->ioda.io_segmap);
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
|
|
|
|
phb->ioda.m32_segmap);
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_segmap);
|
|
|
|
} else if (phb->type == PNV_PHB_IODA2) {
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
|
|
|
|
phb->ioda.m32_segmap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
struct pnv_ioda_pe *slave, *tmp;
|
|
|
|
|
2019-11-21 21:49:09 +08:00
|
|
|
mutex_lock(&phb->ioda.pe_list_mutex);
|
2016-05-20 14:41:35 +08:00
|
|
|
list_del(&pe->list);
|
2019-11-21 21:49:09 +08:00
|
|
|
mutex_unlock(&phb->ioda.pe_list_mutex);
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
switch (phb->type) {
|
|
|
|
case PNV_PHB_IODA1:
|
|
|
|
pnv_pci_ioda1_release_pe_dma(pe);
|
|
|
|
break;
|
|
|
|
case PNV_PHB_IODA2:
|
|
|
|
pnv_pci_ioda2_release_pe_dma(pe);
|
|
|
|
break;
|
2019-11-21 21:49:11 +08:00
|
|
|
case PNV_PHB_NPU_OCAPI:
|
|
|
|
break;
|
2016-05-20 14:41:35 +08:00
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_ioda_release_pe_seg(pe);
|
|
|
|
pnv_ioda_deconfigure_pe(pe->phb, pe);
|
2016-09-06 12:16:44 +08:00
|
|
|
|
|
|
|
/* Release slave PEs in the compound PE */
|
|
|
|
if (pe->flags & PNV_IODA_PE_MASTER) {
|
|
|
|
list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
|
|
|
|
list_del(&slave->list);
|
|
|
|
pnv_ioda_free_pe(slave);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-13 14:40:24 +08:00
|
|
|
/*
|
|
|
|
* The PE for root bus can be removed because of hotplug in EEH
|
|
|
|
* recovery for fenced PHB error. We need to mark the PE dead so
|
|
|
|
* that it can be populated again in PCI hot add path. The PE
|
|
|
|
* shouldn't be destroyed as it's the global reserved resource.
|
|
|
|
*/
|
|
|
|
if (phb->ioda.root_pe_populated &&
|
|
|
|
phb->ioda.root_pe_idx == pe->pe_number)
|
|
|
|
phb->ioda.root_pe_populated = false;
|
|
|
|
else
|
|
|
|
pnv_ioda_free_pe(pe);
|
2016-05-20 14:41:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_release_device(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
if (pdev->is_virtfn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return;
|
|
|
|
|
2016-09-06 14:34:01 +08:00
|
|
|
/*
|
|
|
|
* PCI hotplug can happen as part of EEH error recovery. The @pdn
|
|
|
|
* isn't removed and added afterwards in this scenario. We should
|
|
|
|
* set the PE number in @pdn to an invalid one. Otherwise, the PE's
|
|
|
|
* device count is decreased on removing devices while failing to
|
|
|
|
* be increased on adding devices. It leads to unbalanced PE's device
|
|
|
|
* count and eventually make normal PCI hotplug path broken.
|
|
|
|
*/
|
2016-05-20 14:41:35 +08:00
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
2016-09-06 14:34:01 +08:00
|
|
|
pdn->pe_number = IODA_INVALID_PE;
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
WARN_ON(--pe->device_count < 0);
|
|
|
|
if (pe->device_count == 0)
|
|
|
|
pnv_ioda_release_pe(pe);
|
|
|
|
}
|
|
|
|
|
2018-12-19 16:52:13 +08:00
|
|
|
static void pnv_npu_disable_device(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
|
|
|
|
struct eeh_pe *eehpe = edev ? edev->pe : NULL;
|
|
|
|
|
|
|
|
if (eehpe && eeh_ops && eeh_ops->reset)
|
|
|
|
eeh_ops->reset(eehpe, EEH_RESET_HOT);
|
|
|
|
}
|
|
|
|
|
2015-05-27 14:06:59 +08:00
|
|
|
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
|
2013-05-10 14:59:18 +08:00
|
|
|
{
|
2015-05-27 14:06:59 +08:00
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
2014-09-30 10:39:05 +08:00
|
|
|
opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
|
2013-05-10 14:59:18 +08:00
|
|
|
OPAL_ASSERT_RESET);
|
|
|
|
}
|
|
|
|
|
2020-01-10 15:02:07 +08:00
|
|
|
static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = bus->sysdata;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
|
|
if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!pe->pbus)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (bus->number == ((pe->rid >> 8) & 0xFF)) {
|
|
|
|
pe->pbus = bus;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 13:12:05 +08:00
|
|
|
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
|
2020-01-10 15:02:06 +08:00
|
|
|
.dma_dev_setup = pnv_pci_ioda_dma_dev_setup,
|
2020-01-10 15:02:07 +08:00
|
|
|
.dma_bus_setup = pnv_pci_ioda_dma_bus_setup,
|
2019-02-13 15:01:14 +08:00
|
|
|
.iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
|
2016-05-03 13:41:21 +08:00
|
|
|
.setup_msi_irqs = pnv_setup_msi_irqs,
|
|
|
|
.teardown_msi_irqs = pnv_teardown_msi_irqs,
|
|
|
|
.enable_device_hook = pnv_pci_enable_device_hook,
|
2016-05-20 14:41:35 +08:00
|
|
|
.release_device = pnv_pci_release_device,
|
2016-05-03 13:41:21 +08:00
|
|
|
.window_alignment = pnv_pci_window_alignment,
|
2016-05-20 14:41:31 +08:00
|
|
|
.setup_bridge = pnv_pci_setup_bridge,
|
2016-05-03 13:41:21 +08:00
|
|
|
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
2015-04-28 13:12:05 +08:00
|
|
|
};
|
|
|
|
|
2015-12-17 10:43:13 +08:00
|
|
|
static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
|
2016-05-03 13:41:21 +08:00
|
|
|
.setup_msi_irqs = pnv_setup_msi_irqs,
|
|
|
|
.teardown_msi_irqs = pnv_teardown_msi_irqs,
|
|
|
|
.enable_device_hook = pnv_pci_enable_device_hook,
|
|
|
|
.window_alignment = pnv_pci_window_alignment,
|
|
|
|
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
2018-12-19 16:52:13 +08:00
|
|
|
.disable_device = pnv_npu_disable_device,
|
2015-12-17 10:43:13 +08:00
|
|
|
};
|
|
|
|
|
2018-01-23 19:31:36 +08:00
|
|
|
static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
|
2019-11-21 21:49:10 +08:00
|
|
|
.enable_device_hook = pnv_ocapi_enable_device_hook,
|
2019-11-21 21:49:11 +08:00
|
|
|
.release_device = pnv_pci_release_device,
|
2018-01-23 19:31:36 +08:00
|
|
|
.window_alignment = pnv_pci_window_alignment,
|
|
|
|
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
|
|
|
};
|
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
|
|
|
|
u64 hub_id, int ioda_type)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
2016-05-05 10:04:16 +08:00
|
|
|
unsigned long size, m64map_off, m32map_off, pemap_off;
|
|
|
|
unsigned long iomap_off = 0, dma32map_off = 0;
|
2016-07-08 14:37:14 +08:00
|
|
|
struct resource r;
|
2013-09-23 10:04:57 +08:00
|
|
|
const __be64 *prop64;
|
2013-09-23 10:05:01 +08:00
|
|
|
const __be32 *prop32;
|
2013-07-31 16:47:01 +08:00
|
|
|
int len;
|
2016-05-03 13:41:26 +08:00
|
|
|
unsigned int segno;
|
2011-11-16 01:29:08 +08:00
|
|
|
u64 phb_id;
|
|
|
|
void *aux;
|
|
|
|
long rc;
|
|
|
|
|
2016-07-08 14:37:17 +08:00
|
|
|
if (!of_device_is_available(np))
|
|
|
|
return;
|
|
|
|
|
2017-08-21 23:16:47 +08:00
|
|
|
pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
|
|
|
|
if (!prop64) {
|
|
|
|
pr_err(" Missing \"ibm,opal-phbid\" property !\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
phb_id = be64_to_cpup(prop64);
|
|
|
|
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
|
|
|
|
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
|
2019-03-12 14:30:31 +08:00
|
|
|
if (!phb)
|
|
|
|
panic("%s: Failed to allocate %zu bytes\n", __func__,
|
|
|
|
sizeof(*phb));
|
2013-07-31 16:47:00 +08:00
|
|
|
|
|
|
|
/* Allocate PCI controller */
|
|
|
|
phb->hose = hose = pcibios_alloc_controller(np);
|
|
|
|
if (!phb->hose) {
|
2017-08-21 23:16:47 +08:00
|
|
|
pr_err(" Can't allocate PCI controller for %pOF\n",
|
|
|
|
np);
|
powerpc: Remove more traces of bootmem
Although we are now selecting NO_BOOTMEM, we still have some traces of
bootmem lying around. That is because even with NO_BOOTMEM there is
still a shim that converts bootmem calls into memblock calls, but
ultimately we want to remove all traces of bootmem.
Most of the patch is conversions from alloc_bootmem() to
memblock_virt_alloc(). In general a call such as:
p = (struct foo *)alloc_bootmem(x);
Becomes:
p = memblock_virt_alloc(x, 0);
We don't need the cast because memblock_virt_alloc() returns a void *.
The alignment value of zero tells memblock to use the default alignment,
which is SMP_CACHE_BYTES, the same value alloc_bootmem() uses.
We remove a number of NULL checks on the result of
memblock_virt_alloc(). That is because memblock_virt_alloc() will panic
if it can't allocate, in exactly the same way as alloc_bootmem(), so the
NULL checks are and always have been redundant.
The memory returned by memblock_virt_alloc() is already zeroed, so we
remove several memsets of the result of memblock_virt_alloc().
Finally we convert a few uses of __alloc_bootmem(x, y, MAX_DMA_ADDRESS)
to just plain memblock_virt_alloc(). We don't use memblock_alloc_base()
because MAX_DMA_ADDRESS is ~0ul on powerpc, so limiting the allocation
to that is pointless, 16XB ought to be enough for anyone.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-11-18 13:47:35 +08:00
|
|
|
memblock_free(__pa(phb), sizeof(struct pnv_phb));
|
2011-11-16 01:29:08 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&phb->lock);
|
2013-07-31 16:47:01 +08:00
|
|
|
prop32 = of_get_property(np, "bus-range", &len);
|
|
|
|
if (prop32 && len == 8) {
|
2013-09-23 10:05:01 +08:00
|
|
|
hose->first_busno = be32_to_cpu(prop32[0]);
|
|
|
|
hose->last_busno = be32_to_cpu(prop32[1]);
|
2013-07-31 16:47:01 +08:00
|
|
|
} else {
|
2017-08-21 23:16:47 +08:00
|
|
|
pr_warn(" Broken <bus-range> on %pOF\n", np);
|
2013-07-31 16:47:01 +08:00
|
|
|
hose->first_busno = 0;
|
|
|
|
hose->last_busno = 0xff;
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
hose->private_data = phb;
|
2013-06-20 13:21:14 +08:00
|
|
|
phb->hub_id = hub_id;
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->opal_id = phb_id;
|
2013-04-26 03:20:57 +08:00
|
|
|
phb->type = ioda_type;
|
2015-03-25 16:23:57 +08:00
|
|
|
mutex_init(&phb->ioda.pe_alloc_mutex);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2011-11-30 02:22:53 +08:00
|
|
|
/* Detect specific models for error handling */
|
|
|
|
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
|
|
|
|
phb->model = PNV_PHB_MODEL_P7IOC;
|
2013-05-04 22:24:32 +08:00
|
|
|
else if (of_device_is_compatible(np, "ibm,power8-pciex"))
|
2013-04-26 03:20:57 +08:00
|
|
|
phb->model = PNV_PHB_MODEL_PHB3;
|
2015-12-17 10:43:13 +08:00
|
|
|
else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
|
|
|
|
phb->model = PNV_PHB_MODEL_NPU;
|
2017-01-10 12:41:44 +08:00
|
|
|
else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
|
|
|
|
phb->model = PNV_PHB_MODEL_NPU2;
|
2011-11-30 02:22:53 +08:00
|
|
|
else
|
|
|
|
phb->model = PNV_PHB_MODEL_UNKNOWN;
|
|
|
|
|
2017-06-14 12:19:59 +08:00
|
|
|
/* Initialize diagnostic data buffer */
|
|
|
|
prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL);
|
|
|
|
if (prop32)
|
|
|
|
phb->diag_data_size = be32_to_cpup(prop32);
|
|
|
|
else
|
|
|
|
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
|
|
|
|
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
|
2019-03-12 14:30:31 +08:00
|
|
|
if (!phb->diag_data)
|
|
|
|
panic("%s: Failed to allocate %u bytes\n", __func__,
|
|
|
|
phb->diag_data_size);
|
2017-06-14 12:19:59 +08:00
|
|
|
|
2013-04-26 03:20:57 +08:00
|
|
|
/* Parse 32-bit and IO ranges (if any) */
|
2013-07-31 16:47:02 +08:00
|
|
|
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:20:57 +08:00
|
|
|
/* Get registers */
|
2016-07-08 14:37:14 +08:00
|
|
|
if (!of_address_to_resource(np, 0, &r)) {
|
|
|
|
phb->regs_phys = r.start;
|
|
|
|
phb->regs = ioremap(r.start, resource_size(&r));
|
|
|
|
if (phb->regs == NULL)
|
|
|
|
pr_err(" Failed to map registers !\n");
|
|
|
|
}
|
2016-05-20 14:41:28 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Initialize more IODA stuff */
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.total_pe_num = 1;
|
2013-04-26 03:20:57 +08:00
|
|
|
prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
|
2013-11-04 16:32:47 +08:00
|
|
|
if (prop32)
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.total_pe_num = be32_to_cpup(prop32);
|
2013-11-04 16:32:47 +08:00
|
|
|
prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
|
|
|
|
if (prop32)
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2016-05-20 14:41:29 +08:00
|
|
|
/* Invalidate RID to PE# mapping */
|
|
|
|
for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
|
|
|
|
phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* Parse 64-bit MMIO range */
|
|
|
|
pnv_ioda_parse_m64_window(phb);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
|
2013-04-26 03:20:57 +08:00
|
|
|
/* FW Has already off top 64k of M32 space (MSI space) */
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.m32_size += 0x10000;
|
|
|
|
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
|
2013-05-06 11:40:40 +08:00
|
|
|
phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.io_size = hose->pci_io_size;
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
|
|
|
|
|
2016-05-05 10:04:16 +08:00
|
|
|
/* Calculate how many 32-bit TCE segments we have */
|
|
|
|
phb->ioda.dma32_count = phb->ioda.m32_pci_base /
|
|
|
|
PNV_IODA1_DMA32_SEGSIZE;
|
|
|
|
|
2013-07-31 16:47:04 +08:00
|
|
|
/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
|
2016-05-12 13:47:09 +08:00
|
|
|
size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
|
|
|
|
sizeof(unsigned long));
|
2016-05-03 13:41:29 +08:00
|
|
|
m64map_off = size;
|
|
|
|
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
|
2011-11-16 01:29:08 +08:00
|
|
|
m32map_off = size;
|
2016-05-03 13:41:24 +08:00
|
|
|
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
|
2013-07-31 16:47:04 +08:00
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
|
|
|
iomap_off = size;
|
2016-05-03 13:41:24 +08:00
|
|
|
size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
|
2016-05-05 10:04:16 +08:00
|
|
|
dma32map_off = size;
|
|
|
|
size += phb->ioda.dma32_count *
|
|
|
|
sizeof(phb->ioda.dma32_segmap[0]);
|
2013-07-31 16:47:04 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
pemap_off = size;
|
2016-05-03 13:41:24 +08:00
|
|
|
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
aux = memblock_alloc(size, SMP_CACHE_BYTES);
|
2019-03-12 14:30:31 +08:00
|
|
|
if (!aux)
|
|
|
|
panic("%s: Failed to allocate %lu bytes\n", __func__, size);
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.pe_alloc = aux;
|
2016-05-03 13:41:29 +08:00
|
|
|
phb->ioda.m64_segmap = aux + m64map_off;
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.m32_segmap = aux + m32map_off;
|
2016-05-03 13:41:29 +08:00
|
|
|
for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
|
|
|
|
phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
|
2016-05-03 13:41:26 +08:00
|
|
|
phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
|
2016-05-03 13:41:29 +08:00
|
|
|
}
|
2016-05-03 13:41:26 +08:00
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
2013-07-31 16:47:04 +08:00
|
|
|
phb->ioda.io_segmap = aux + iomap_off;
|
2016-05-03 13:41:26 +08:00
|
|
|
for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
|
|
|
|
phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
|
2016-05-05 10:04:16 +08:00
|
|
|
|
|
|
|
phb->ioda.dma32_segmap = aux + dma32map_off;
|
|
|
|
for (segno = 0; segno < phb->ioda.dma32_count; segno++)
|
|
|
|
phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
|
2016-05-03 13:41:26 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.pe_array = aux + pemap_off;
|
2016-05-20 14:41:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose PE number for root bus, which shouldn't have
|
|
|
|
* M64 resources consumed by its child devices. To pick
|
|
|
|
* the PE number adjacent to the reserved one if possible.
|
|
|
|
*/
|
|
|
|
pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
|
|
|
|
if (phb->ioda.reserved_pe_idx == 0) {
|
|
|
|
phb->ioda.root_pe_idx = 1;
|
|
|
|
pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
|
|
|
|
} else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
|
|
|
|
phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
|
|
|
|
pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
|
|
|
|
} else {
|
|
|
|
phb->ioda.root_pe_idx = IODA_INVALID_PE;
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&phb->ioda.pe_list);
|
2015-03-25 16:23:57 +08:00
|
|
|
mutex_init(&phb->ioda.pe_list_mutex);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Calculate how many 32-bit TCE segments we have */
|
2016-05-05 10:04:16 +08:00
|
|
|
phb->ioda.dma32_count = phb->ioda.m32_pci_base /
|
2016-05-03 13:41:33 +08:00
|
|
|
PNV_IODA1_DMA32_SEGSIZE;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:20:57 +08:00
|
|
|
#if 0 /* We should really do that ... */
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_pci_set_phb_mem_window(opal->phb_id,
|
|
|
|
window_type,
|
|
|
|
window_num,
|
|
|
|
starting_real_address,
|
|
|
|
starting_pci_address,
|
|
|
|
segment_size);
|
|
|
|
#endif
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
|
2014-07-21 12:42:30 +08:00
|
|
|
phb->ioda.m32_size, phb->ioda.m32_segsize);
|
|
|
|
if (phb->ioda.m64_size)
|
|
|
|
pr_info(" M64: 0x%lx [segment=0x%lx]\n",
|
|
|
|
phb->ioda.m64_size, phb->ioda.m64_segsize);
|
|
|
|
if (phb->ioda.io_size)
|
|
|
|
pr_info(" IO: 0x%x [segment=0x%x]\n",
|
|
|
|
phb->ioda.io_size, phb->ioda.io_segsize);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
phb->hose->ops = &pnv_pci_ops;
|
2014-07-21 12:42:33 +08:00
|
|
|
phb->get_pe_state = pnv_ioda_get_pe_state;
|
|
|
|
phb->freeze_pe = pnv_ioda_freeze_pe;
|
|
|
|
phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Setup MSI support */
|
|
|
|
pnv_pci_init_ioda_msis(phb);
|
|
|
|
|
2012-08-20 11:49:20 +08:00
|
|
|
/*
|
|
|
|
* We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
|
|
|
|
* to let the PCI core do resource assignment. It's supposed
|
|
|
|
* that the PCI core will do correct I/O and MMIO alignment
|
|
|
|
* for the P2P bridge bars so that each PCI bus (excluding
|
|
|
|
* the child P2P bridges) can form individual PE.
|
2011-11-16 01:29:08 +08:00
|
|
|
*/
|
2012-08-20 11:49:14 +08:00
|
|
|
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
|
2015-12-17 10:43:13 +08:00
|
|
|
|
2018-01-23 19:31:36 +08:00
|
|
|
switch (phb->type) {
|
|
|
|
case PNV_PHB_NPU_NVLINK:
|
2015-12-17 10:43:13 +08:00
|
|
|
hose->controller_ops = pnv_npu_ioda_controller_ops;
|
2018-01-23 19:31:36 +08:00
|
|
|
break;
|
|
|
|
case PNV_PHB_NPU_OCAPI:
|
|
|
|
hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
|
|
|
|
break;
|
|
|
|
default:
|
2015-12-17 10:43:13 +08:00
|
|
|
hose->controller_ops = pnv_pci_ioda_controller_ops;
|
2016-04-29 16:55:20 +08:00
|
|
|
}
|
2015-04-14 07:29:23 +08:00
|
|
|
|
2017-04-10 19:58:13 +08:00
|
|
|
ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
|
|
|
|
|
2015-03-25 16:23:55 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2020-01-10 15:02:05 +08:00
|
|
|
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov;
|
2015-03-25 16:23:56 +08:00
|
|
|
ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
|
2017-11-09 22:00:33 +08:00
|
|
|
ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable;
|
|
|
|
ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable;
|
2015-04-14 07:29:23 +08:00
|
|
|
#endif
|
|
|
|
|
2012-08-20 11:49:20 +08:00
|
|
|
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Reset IODA tables to a clean state */
|
2014-09-30 10:39:05 +08:00
|
|
|
rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (rc)
|
2016-10-25 12:00:08 +08:00
|
|
|
pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc);
|
2014-04-24 16:00:25 +08:00
|
|
|
|
2016-09-16 18:39:44 +08:00
|
|
|
/*
|
|
|
|
* If we're running in kdump kernel, the previous kernel never
|
2014-04-24 16:00:25 +08:00
|
|
|
* shutdown PCI devices correctly. We already got IODA table
|
|
|
|
* cleaned out. So we have to issue PHB reset to stop all PCI
|
2017-11-18 02:58:59 +08:00
|
|
|
* transactions from previous kernel. The ppc_pci_reset_phbs
|
2019-02-01 08:42:01 +08:00
|
|
|
* kernel parameter will force this reset too. Additionally,
|
|
|
|
* if the IODA reset above failed then use a bigger hammer.
|
|
|
|
* This can happen if we get a PHB fatal error in very early
|
|
|
|
* boot.
|
2014-04-24 16:00:25 +08:00
|
|
|
*/
|
2019-02-01 08:42:01 +08:00
|
|
|
if (is_kdump_kernel() || pci_reset_phbs || rc) {
|
2014-04-24 16:00:25 +08:00
|
|
|
pr_info(" Issue PHB reset ...\n");
|
2015-02-16 11:45:47 +08:00
|
|
|
pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
|
|
|
|
pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
|
2014-04-24 16:00:25 +08:00
|
|
|
}
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2014-11-12 10:36:05 +08:00
|
|
|
/* Remove M64 resource if we can't configure it successfully */
|
|
|
|
if (!phb->init_m64 || phb->init_m64(phb))
|
2014-07-21 12:42:30 +08:00
|
|
|
hose->mem_resources[1].flags = 0;
|
2013-04-26 03:20:57 +08:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:20:03 +08:00
|
|
|
void __init pnv_pci_init_ioda2_phb(struct device_node *np)
|
2013-04-26 03:20:57 +08:00
|
|
|
{
|
2013-06-20 13:21:14 +08:00
|
|
|
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2015-12-17 10:43:13 +08:00
|
|
|
void __init pnv_pci_init_npu_phb(struct device_node *np)
|
|
|
|
{
|
2018-01-23 19:31:36 +08:00
|
|
|
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
|
|
|
|
{
|
|
|
|
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
|
2015-12-17 10:43:13 +08:00
|
|
|
}
|
|
|
|
|
2018-01-23 19:31:37 +08:00
|
|
|
static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
|
|
|
if (!machine_is(powernv))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_NPU_OCAPI)
|
|
|
|
dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
|
2015-12-17 10:43:13 +08:00
|
|
|
}
|
2018-01-23 19:31:37 +08:00
|
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup);
|
2015-12-17 10:43:13 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
void __init pnv_pci_init_ioda_hub(struct device_node *np)
|
|
|
|
{
|
|
|
|
struct device_node *phbn;
|
2013-09-23 10:04:57 +08:00
|
|
|
const __be64 *prop64;
|
2011-11-16 01:29:08 +08:00
|
|
|
u64 hub_id;
|
|
|
|
|
2017-08-21 23:16:47 +08:00
|
|
|
pr_info("Probing IODA IO-Hub %pOF\n", np);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
|
|
|
|
if (!prop64) {
|
|
|
|
pr_err(" Missing \"ibm,opal-hubid\" property !\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
hub_id = be64_to_cpup(prop64);
|
|
|
|
pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
|
|
|
|
|
|
|
|
/* Count child PHBs */
|
|
|
|
for_each_child_of_node(np, phbn) {
|
|
|
|
/* Look for IODA1 PHBs */
|
|
|
|
if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
|
2013-06-20 13:21:14 +08:00
|
|
|
pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
}
|