2011-11-16 01:29:08 +08:00
|
|
|
/*
|
|
|
|
* Support PCI/PCIe on PowerNV platforms
|
|
|
|
*
|
|
|
|
* Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
2011-11-30 02:22:53 +08:00
|
|
|
#undef DEBUG
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/pci.h>
|
2014-04-24 16:00:25 +08:00
|
|
|
#include <linux/crash_dump.h>
|
2013-06-20 18:13:25 +08:00
|
|
|
#include <linux/debugfs.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/msi.h>
|
2014-02-11 08:32:38 +08:00
|
|
|
#include <linux/memblock.h>
|
2015-06-05 14:34:56 +08:00
|
|
|
#include <linux/iommu.h>
|
2015-06-05 14:35:13 +08:00
|
|
|
#include <linux/rculist.h>
|
2015-06-05 14:35:20 +08:00
|
|
|
#include <linux/sizes.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm/machdep.h>
|
2013-03-06 05:12:37 +08:00
|
|
|
#include <asm/msi_bitmap.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
#include <asm/ppc-pci.h>
|
|
|
|
#include <asm/opal.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/tce.h>
|
2013-04-26 03:20:59 +08:00
|
|
|
#include <asm/xics.h>
|
2013-06-20 18:13:25 +08:00
|
|
|
#include <asm/debug.h>
|
2014-07-21 12:42:30 +08:00
|
|
|
#include <asm/firmware.h>
|
2014-10-08 16:54:57 +08:00
|
|
|
#include <asm/pnv-pci.h>
|
2015-06-05 14:35:17 +08:00
|
|
|
#include <asm/mmzone.h>
|
2014-10-08 16:54:57 +08:00
|
|
|
|
2015-05-27 14:07:16 +08:00
|
|
|
#include <misc/cxl-base.h>
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
#include "powernv.h"
|
|
|
|
#include "pci.h"
|
|
|
|
|
2016-05-05 10:02:13 +08:00
|
|
|
#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
|
|
|
|
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
|
2016-05-03 13:41:33 +08:00
|
|
|
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
#define POWERNV_IOMMU_DEFAULT_LEVELS 1
|
|
|
|
#define POWERNV_IOMMU_MAX_LEVELS 5
|
|
|
|
|
2016-06-21 10:35:56 +08:00
|
|
|
static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU" };
|
2015-06-05 14:35:17 +08:00
|
|
|
static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
|
|
|
|
|
2016-04-29 16:55:21 +08:00
|
|
|
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
|
2014-09-22 01:55:06 +08:00
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
struct va_format vaf;
|
|
|
|
va_list args;
|
|
|
|
char pfix[32];
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
|
|
|
|
vaf.fmt = fmt;
|
|
|
|
vaf.va = &args;
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_DEV)
|
2014-09-22 01:55:06 +08:00
|
|
|
strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
|
2015-03-25 16:23:57 +08:00
|
|
|
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
2014-09-22 01:55:06 +08:00
|
|
|
sprintf(pfix, "%04x:%02x ",
|
|
|
|
pci_domain_nr(pe->pbus), pe->pbus->number);
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
else if (pe->flags & PNV_IODA_PE_VF)
|
|
|
|
sprintf(pfix, "%04x:%02x:%2x.%d",
|
|
|
|
pci_domain_nr(pe->parent_dev->bus),
|
|
|
|
(pe->rid & 0xff00) >> 8,
|
|
|
|
PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
|
|
|
|
#endif /* CONFIG_PCI_IOV*/
|
2014-09-22 01:55:06 +08:00
|
|
|
|
2016-11-16 11:02:15 +08:00
|
|
|
printk("%spci %s: [PE# %.2x] %pV",
|
2014-09-22 01:55:06 +08:00
|
|
|
level, pfix, pe->pe_number, &vaf);
|
|
|
|
|
|
|
|
va_end(args);
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2014-10-24 05:19:35 +08:00
|
|
|
static bool pnv_iommu_bypass_disabled __read_mostly;
|
|
|
|
|
|
|
|
static int __init iommu_setup(char *str)
|
|
|
|
{
|
|
|
|
if (!str)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
while (*str) {
|
|
|
|
if (!strncmp(str, "nobypass", 8)) {
|
|
|
|
pnv_iommu_bypass_disabled = true;
|
|
|
|
pr_info("PowerNV: IOMMU bypass window disabled.\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
str += strcspn(str, ",");
|
|
|
|
if (*str == ',')
|
|
|
|
str++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("iommu", iommu_setup);
|
|
|
|
|
2016-07-08 13:55:43 +08:00
|
|
|
static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
2016-07-08 13:55:43 +08:00
|
|
|
/*
|
|
|
|
* WARNING: We cannot rely on the resource flags. The Linux PCI
|
|
|
|
* allocation code sometimes decides to put a 64-bit prefetchable
|
|
|
|
* BAR in the 32-bit window, so we have to compare the addresses.
|
|
|
|
*
|
|
|
|
* For simplicity we only test resource start.
|
|
|
|
*/
|
|
|
|
return (r->start >= phb->ioda.m64_base &&
|
|
|
|
r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
2016-09-14 14:37:17 +08:00
|
|
|
static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
|
|
|
|
{
|
|
|
|
unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
|
|
|
|
|
|
|
|
return (resource_flags & flags) == flags;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
2016-09-28 12:34:56 +08:00
|
|
|
s64 rc;
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
phb->ioda.pe_array[pe_no].phb = phb;
|
|
|
|
phb->ioda.pe_array[pe_no].pe_number = pe_no;
|
|
|
|
|
2016-09-28 12:34:56 +08:00
|
|
|
/*
|
|
|
|
* Clear the PE frozen state as it might be put into frozen state
|
|
|
|
* in the last PCI remove path. It's not harmful to do so when the
|
|
|
|
* PE is already in unfrozen state.
|
|
|
|
*/
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
2016-11-16 09:12:26 +08:00
|
|
|
if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
|
2016-09-28 12:34:56 +08:00
|
|
|
__func__, rc, phb->hose->global_number, pe_no);
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
return &phb->ioda.pe_array[pe_no];
|
|
|
|
}
|
|
|
|
|
2014-11-12 10:36:07 +08:00
|
|
|
static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
2016-05-03 13:41:24 +08:00
|
|
|
if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: Invalid PE %x on PHB#%x\n",
|
2014-11-12 10:36:07 +08:00
|
|
|
__func__, pe_no, phb->hose->global_number);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-06-19 10:26:16 +08:00
|
|
|
if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_debug("%s: PE %x was reserved on PHB#%x\n",
|
2015-06-19 10:26:16 +08:00
|
|
|
__func__, pe_no, phb->hose->global_number);
|
2014-11-12 10:36:07 +08:00
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_init_pe(phb, pe_no);
|
2014-11-12 10:36:07 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2016-08-17 18:03:05 +08:00
|
|
|
long pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-20 14:41:30 +08:00
|
|
|
for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
|
|
|
|
if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
|
|
|
|
return pnv_ioda_init_pe(phb, pe);
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-20 14:41:30 +08:00
|
|
|
return NULL;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_phb *phb = pe->phb;
|
2016-09-06 12:17:18 +08:00
|
|
|
unsigned int pe_num = pe->pe_number;
|
2016-05-03 13:41:36 +08:00
|
|
|
|
|
|
|
WARN_ON(pe->pdev);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
memset(pe, 0, sizeof(struct pnv_ioda_pe));
|
2016-09-06 12:17:18 +08:00
|
|
|
clear_bit(pe_num, phb->ioda.pe_alloc);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* The default M64 BAR is shared by all PEs */
|
|
|
|
static int pnv_ioda2_init_m64(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
const char *desc;
|
|
|
|
struct resource *r;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Configure the default M64 BAR */
|
|
|
|
rc = opal_pci_set_phb_mem_window(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_bar_idx,
|
|
|
|
phb->ioda.m64_base,
|
|
|
|
0, /* unused */
|
|
|
|
phb->ioda.m64_size);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
desc = "configuring";
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable the default M64 BAR */
|
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_bar_idx,
|
|
|
|
OPAL_ENABLE_M64_SPLIT);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
desc = "enabling";
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-20 14:41:32 +08:00
|
|
|
* Exclude the segments for reserved and root bus PE, which
|
|
|
|
* are first or last two PEs.
|
2014-07-21 12:42:30 +08:00
|
|
|
*/
|
|
|
|
r = &phb->hose->mem_resources[1];
|
2016-05-03 13:41:24 +08:00
|
|
|
if (phb->ioda.reserved_pe_idx == 0)
|
2016-05-20 14:41:32 +08:00
|
|
|
r->start += (2 * phb->ioda.m64_segsize);
|
2016-05-03 13:41:24 +08:00
|
|
|
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
|
2016-05-20 14:41:32 +08:00
|
|
|
r->end -= (2 * phb->ioda.m64_segsize);
|
2014-07-21 12:42:30 +08:00
|
|
|
else
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.reserved_pe_idx);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
pr_warn(" Failure %lld %s M64 BAR#%d\n",
|
|
|
|
rc, desc, phb->ioda.m64_bar_idx);
|
|
|
|
opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_bar_idx,
|
|
|
|
OPAL_DISABLE_M64);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:30 +08:00
|
|
|
static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
|
2015-06-19 10:26:17 +08:00
|
|
|
unsigned long *pe_bitmap)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
2015-06-19 10:26:17 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-07-21 12:42:30 +08:00
|
|
|
struct resource *r;
|
2015-06-19 10:26:17 +08:00
|
|
|
resource_size_t base, sgsz, start, end;
|
|
|
|
int segno, i;
|
|
|
|
|
|
|
|
base = phb->ioda.m64_base;
|
|
|
|
sgsz = phb->ioda.m64_segsize;
|
|
|
|
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
|
|
|
|
r = &pdev->resource[i];
|
2016-07-08 13:55:43 +08:00
|
|
|
if (!r->parent || !pnv_pci_is_m64(phb, r))
|
2015-06-19 10:26:17 +08:00
|
|
|
continue;
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2015-06-19 10:26:17 +08:00
|
|
|
start = _ALIGN_DOWN(r->start - base, sgsz);
|
|
|
|
end = _ALIGN_UP(r->end - base, sgsz);
|
|
|
|
for (segno = start / sgsz; segno < end / sgsz; segno++) {
|
|
|
|
if (pe_bitmap)
|
|
|
|
set_bit(segno, pe_bitmap);
|
|
|
|
else
|
|
|
|
pnv_ioda_reserve_pe(phb, segno);
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 10:02:13 +08:00
|
|
|
static int pnv_ioda1_init_m64(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
struct resource *r;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There are 16 M64 BARs, each of which has 8 segments. So
|
|
|
|
* there are as many M64 segments as the maximum number of
|
|
|
|
* PEs, which is 128.
|
|
|
|
*/
|
|
|
|
for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
|
|
|
|
unsigned long base, segsz = phb->ioda.m64_segsize;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
base = phb->ioda.m64_base +
|
|
|
|
index * PNV_IODA1_M64_SEGS * segsz;
|
|
|
|
rc = opal_pci_set_phb_mem_window(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE, index, base, 0,
|
|
|
|
PNV_IODA1_M64_SEGS * segsz);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
rc, phb->hose->global_number, index);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE, index,
|
|
|
|
OPAL_ENABLE_M64_SPLIT);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
rc, phb->hose->global_number, index);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-20 14:41:32 +08:00
|
|
|
* Exclude the segments for reserved and root bus PE, which
|
|
|
|
* are first or last two PEs.
|
2016-05-05 10:02:13 +08:00
|
|
|
*/
|
|
|
|
r = &phb->hose->mem_resources[1];
|
|
|
|
if (phb->ioda.reserved_pe_idx == 0)
|
2016-05-20 14:41:32 +08:00
|
|
|
r->start += (2 * phb->ioda.m64_segsize);
|
2016-05-05 10:02:13 +08:00
|
|
|
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
|
2016-05-20 14:41:32 +08:00
|
|
|
r->end -= (2 * phb->ioda.m64_segsize);
|
2016-05-05 10:02:13 +08:00
|
|
|
else
|
2016-11-16 11:02:15 +08:00
|
|
|
WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
phb->ioda.reserved_pe_idx, phb->hose->global_number);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
for ( ; index >= 0; index--)
|
|
|
|
opal_pci_phb_mmio_enable(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
|
|
|
|
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:30 +08:00
|
|
|
static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
|
|
|
|
unsigned long *pe_bitmap,
|
|
|
|
bool all)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
2015-06-19 10:26:17 +08:00
|
|
|
|
|
|
|
list_for_each_entry(pdev, &bus->devices, bus_list) {
|
2016-05-03 13:41:30 +08:00
|
|
|
pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
|
2015-06-19 10:26:17 +08:00
|
|
|
|
|
|
|
if (all && pdev->subordinate)
|
2016-05-03 13:41:30 +08:00
|
|
|
pnv_ioda_reserve_m64_pe(pdev->subordinate,
|
|
|
|
pe_bitmap, all);
|
2015-06-19 10:26:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
|
2014-07-21 12:42:30 +08:00
|
|
|
{
|
2015-06-19 10:26:19 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-07-21 12:42:30 +08:00
|
|
|
struct pnv_ioda_pe *master_pe, *pe;
|
|
|
|
unsigned long size, *pe_alloc;
|
2015-06-19 10:26:19 +08:00
|
|
|
int i;
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/* Root bus shouldn't use M64 */
|
|
|
|
if (pci_is_root_bus(bus))
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/* Allocate bitmap */
|
2016-05-03 13:41:24 +08:00
|
|
|
size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
|
2014-07-21 12:42:30 +08:00
|
|
|
pe_alloc = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!pe_alloc) {
|
|
|
|
pr_warn("%s: Out of memory !\n",
|
|
|
|
__func__);
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
2015-06-19 10:26:19 +08:00
|
|
|
/* Figure out reserved PE numbers by the PE */
|
2016-05-03 13:41:30 +08:00
|
|
|
pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* the current bus might not own M64 window and that's all
|
|
|
|
* contributed by its child buses. For the case, we needn't
|
|
|
|
* pick M64 dependent PE#.
|
|
|
|
*/
|
2016-05-03 13:41:24 +08:00
|
|
|
if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
|
2014-07-21 12:42:30 +08:00
|
|
|
kfree(pe_alloc);
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Figure out the master PE and put all slave PEs to master
|
|
|
|
* PE's list to form compound PE.
|
|
|
|
*/
|
|
|
|
master_pe = NULL;
|
|
|
|
i = -1;
|
2016-05-03 13:41:24 +08:00
|
|
|
while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
|
|
|
|
phb->ioda.total_pe_num) {
|
2014-07-21 12:42:30 +08:00
|
|
|
pe = &phb->ioda.pe_array[i];
|
|
|
|
|
2016-05-03 13:41:29 +08:00
|
|
|
phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
|
2014-07-21 12:42:30 +08:00
|
|
|
if (!master_pe) {
|
|
|
|
pe->flags |= PNV_IODA_PE_MASTER;
|
|
|
|
INIT_LIST_HEAD(&pe->slaves);
|
|
|
|
master_pe = pe;
|
|
|
|
} else {
|
|
|
|
pe->flags |= PNV_IODA_PE_SLAVE;
|
|
|
|
pe->master = master_pe;
|
|
|
|
list_add_tail(&pe->list, &master_pe->slaves);
|
|
|
|
}
|
2016-05-05 10:02:13 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* P7IOC supports M64DT, which helps mapping M64 segment
|
|
|
|
* to one particular PE#. However, PHB3 has fixed mapping
|
|
|
|
* between M64 segment and PE#. In order to have same logic
|
|
|
|
* for P7IOC and PHB3, we enforce fixed mapping between M64
|
|
|
|
* segment and PE# on P7IOC.
|
|
|
|
*/
|
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe->pe_number, OPAL_M64_WINDOW_TYPE,
|
|
|
|
pe->pe_number / PNV_IODA1_M64_SEGS,
|
|
|
|
pe->pe_number % PNV_IODA1_M64_SEGS);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
|
2016-05-05 10:02:13 +08:00
|
|
|
__func__, rc, phb->hose->global_number,
|
|
|
|
pe->pe_number);
|
|
|
|
}
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
kfree(pe_alloc);
|
2016-05-03 13:41:36 +08:00
|
|
|
return master_pe;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = phb->hose;
|
|
|
|
struct device_node *dn = hose->dn;
|
|
|
|
struct resource *res;
|
2016-07-08 14:37:16 +08:00
|
|
|
u32 m64_range[2], i;
|
2016-08-02 12:10:35 +08:00
|
|
|
const __be32 *r;
|
2014-07-21 12:42:30 +08:00
|
|
|
u64 pci_addr;
|
|
|
|
|
2016-05-05 10:02:13 +08:00
|
|
|
if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
|
2014-11-12 10:36:04 +08:00
|
|
|
pr_info(" Not support M64 window\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-12-09 14:18:20 +08:00
|
|
|
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
|
2014-07-21 12:42:30 +08:00
|
|
|
pr_info(" Firmware too old to support M64 window\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = of_get_property(dn, "ibm,opal-m64-window", NULL);
|
|
|
|
if (!r) {
|
|
|
|
pr_info(" No <ibm,opal-m64-window> on %s\n",
|
|
|
|
dn->full_name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:16 +08:00
|
|
|
/*
|
|
|
|
* Find the available M64 BAR range and pickup the last one for
|
|
|
|
* covering the whole 64-bits space. We support only one range.
|
|
|
|
*/
|
|
|
|
if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
|
|
|
|
m64_range, 2)) {
|
|
|
|
/* In absence of the property, assume 0..15 */
|
|
|
|
m64_range[0] = 0;
|
|
|
|
m64_range[1] = 16;
|
|
|
|
}
|
|
|
|
/* We only support 64 bits in our allocator */
|
|
|
|
if (m64_range[1] > 63) {
|
|
|
|
pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
|
|
|
|
__func__, m64_range[1], phb->hose->global_number);
|
|
|
|
m64_range[1] = 63;
|
|
|
|
}
|
|
|
|
/* Empty range, no m64 */
|
|
|
|
if (m64_range[1] <= m64_range[0]) {
|
|
|
|
pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
|
|
|
|
__func__, phb->hose->global_number);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure M64 informations */
|
2014-07-21 12:42:30 +08:00
|
|
|
res = &hose->mem_resources[1];
|
2015-10-22 09:03:08 +08:00
|
|
|
res->name = dn->full_name;
|
2014-07-21 12:42:30 +08:00
|
|
|
res->start = of_translate_address(dn, r + 2);
|
|
|
|
res->end = res->start + of_read_number(r + 4, 2) - 1;
|
|
|
|
res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
|
|
|
|
pci_addr = of_read_number(r, 2);
|
|
|
|
hose->mem_offset[1] = res->start - pci_addr;
|
|
|
|
|
|
|
|
phb->ioda.m64_size = resource_size(res);
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
|
2014-07-21 12:42:30 +08:00
|
|
|
phb->ioda.m64_base = pci_addr;
|
|
|
|
|
2016-07-08 14:37:16 +08:00
|
|
|
/* This lines up nicely with the display from processing OF ranges */
|
|
|
|
pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
|
|
|
|
res->start, res->end, pci_addr, m64_range[0],
|
|
|
|
m64_range[0] + m64_range[1] - 1);
|
|
|
|
|
|
|
|
/* Mark all M64 used up by default */
|
|
|
|
phb->ioda.m64_bar_alloc = (unsigned long)-1;
|
2014-12-12 12:39:37 +08:00
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* Use last M64 BAR to cover M64 window */
|
2016-07-08 14:37:16 +08:00
|
|
|
m64_range[1]--;
|
|
|
|
phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
|
|
|
|
|
|
|
|
pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
|
|
|
|
|
|
|
|
/* Mark remaining ones free */
|
|
|
|
for (i = m64_range[0]; i < m64_range[1]; i++)
|
|
|
|
clear_bit(i, &phb->ioda.m64_bar_alloc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup init functions for M64 based on IODA version, IODA3 uses
|
|
|
|
* the IODA2 code.
|
|
|
|
*/
|
2016-05-05 10:02:13 +08:00
|
|
|
if (phb->type == PNV_PHB_IODA1)
|
|
|
|
phb->init_m64 = pnv_ioda1_init_m64;
|
|
|
|
else
|
|
|
|
phb->init_m64 = pnv_ioda2_init_m64;
|
2016-05-03 13:41:30 +08:00
|
|
|
phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe;
|
|
|
|
phb->pick_m64_pe = pnv_ioda_pick_m64_pe;
|
2014-07-21 12:42:30 +08:00
|
|
|
}
|
|
|
|
|
2014-07-21 12:42:33 +08:00
|
|
|
static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
|
|
|
|
struct pnv_ioda_pe *slave;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Fetch master PE */
|
|
|
|
if (pe->flags & PNV_IODA_PE_SLAVE) {
|
|
|
|
pe = pe->master;
|
2014-11-12 10:36:10 +08:00
|
|
|
if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
|
|
|
|
return;
|
|
|
|
|
2014-07-21 12:42:33 +08:00
|
|
|
pe_no = pe->pe_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Freeze master PE */
|
|
|
|
rc = opal_pci_eeh_freeze_set(phb->opal_id,
|
|
|
|
pe_no,
|
|
|
|
OPAL_EEH_ACTION_SET_FREEZE_ALL);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, phb->hose->global_number, pe_no);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Freeze slave PEs */
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
rc = opal_pci_eeh_freeze_set(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
OPAL_EEH_ACTION_SET_FREEZE_ALL);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, phb->hose->global_number,
|
|
|
|
slave->pe_number);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
|
2014-07-21 12:42:33 +08:00
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe, *slave;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Find master PE */
|
|
|
|
pe = &phb->ioda.pe_array[pe_no];
|
|
|
|
if (pe->flags & PNV_IODA_PE_SLAVE) {
|
|
|
|
pe = pe->master;
|
|
|
|
WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
|
|
|
|
pe_no = pe->pe_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear frozen state for master PE */
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, opt, phb->hose->global_number, pe_no);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Clear frozen state for slave PEs */
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
opt);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, opt, phb->hose->global_number,
|
|
|
|
slave->pe_number);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *slave, *pe;
|
|
|
|
u8 fstate, state;
|
|
|
|
__be16 pcierr;
|
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Sanity check on PE number */
|
2016-05-03 13:41:24 +08:00
|
|
|
if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
|
2014-07-21 12:42:33 +08:00
|
|
|
return OPAL_EEH_STOPPED_PERM_UNAVAIL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch the master PE and the PE instance might be
|
|
|
|
* not initialized yet.
|
|
|
|
*/
|
|
|
|
pe = &phb->ioda.pe_array[pe_no];
|
|
|
|
if (pe->flags & PNV_IODA_PE_SLAVE) {
|
|
|
|
pe = pe->master;
|
|
|
|
WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
|
|
|
|
pe_no = pe->pe_number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the master PE */
|
|
|
|
rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
|
|
|
|
&state, &pcierr, NULL);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld getting "
|
|
|
|
"PHB#%x-PE#%x state\n",
|
|
|
|
__func__, rc,
|
|
|
|
phb->hose->global_number, pe_no);
|
|
|
|
return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the slave PE */
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return state;
|
|
|
|
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
rc = opal_pci_eeh_freeze_status(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
&fstate,
|
|
|
|
&pcierr,
|
|
|
|
NULL);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld getting "
|
|
|
|
"PHB#%x-PE#%x state\n",
|
|
|
|
__func__, rc,
|
|
|
|
phb->hose->global_number, slave->pe_number);
|
|
|
|
return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Override the result based on the ascending
|
|
|
|
* priority.
|
|
|
|
*/
|
|
|
|
if (fstate > state)
|
|
|
|
state = fstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Currently those 2 are only used when MSIs are enabled, this will change
|
|
|
|
* but in the meantime, we need to protect them to avoid warnings
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
2016-07-14 05:17:00 +08:00
|
|
|
struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
if (!pdn)
|
|
|
|
return NULL;
|
|
|
|
if (pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return NULL;
|
|
|
|
return &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
|
|
|
|
2014-11-12 10:36:08 +08:00
|
|
|
static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *parent,
|
|
|
|
struct pnv_ioda_pe *child,
|
|
|
|
bool is_add)
|
|
|
|
{
|
|
|
|
const char *desc = is_add ? "adding" : "removing";
|
|
|
|
uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
|
|
|
|
OPAL_REMOVE_PE_FROM_DOMAIN;
|
|
|
|
struct pnv_ioda_pe *slave;
|
|
|
|
long rc;
|
|
|
|
|
|
|
|
/* Parent PE affects child PE */
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
|
|
|
|
child->pe_number, op);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
|
|
|
|
rc, desc);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(child->flags & PNV_IODA_PE_MASTER))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Compound case: parent PE affects slave PEs */
|
|
|
|
list_for_each_entry(slave, &child->slaves, list) {
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
|
|
|
|
slave->pe_number, op);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
|
|
|
|
rc, desc);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_ioda_set_peltv(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe,
|
|
|
|
bool is_add)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *slave;
|
2015-03-25 16:23:57 +08:00
|
|
|
struct pci_dev *pdev = NULL;
|
2014-11-12 10:36:08 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear PE frozen state. If it's master PE, we need
|
|
|
|
* clear slave PE frozen state as well.
|
|
|
|
*/
|
|
|
|
if (is_add) {
|
|
|
|
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
if (pe->flags & PNV_IODA_PE_MASTER) {
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list)
|
|
|
|
opal_pci_eeh_freeze_clear(phb->opal_id,
|
|
|
|
slave->pe_number,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Associate PE in PELT. We need add the PE into the
|
|
|
|
* corresponding PELT-V as well. Otherwise, the error
|
|
|
|
* originated from the PE might contribute to other
|
|
|
|
* PEs.
|
|
|
|
*/
|
|
|
|
ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* For compound PEs, any one affects all of them */
|
|
|
|
if (pe->flags & PNV_IODA_PE_MASTER) {
|
|
|
|
list_for_each_entry(slave, &pe->slaves, list) {
|
|
|
|
ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
|
|
|
|
pdev = pe->pbus->self;
|
2015-03-25 16:23:57 +08:00
|
|
|
else if (pe->flags & PNV_IODA_PE_DEV)
|
2014-11-12 10:36:08 +08:00
|
|
|
pdev = pe->pdev->bus->self;
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
else if (pe->flags & PNV_IODA_PE_VF)
|
2015-06-22 11:45:47 +08:00
|
|
|
pdev = pe->parent_dev;
|
2015-03-25 16:23:57 +08:00
|
|
|
#endif /* CONFIG_PCI_IOV */
|
2014-11-12 10:36:08 +08:00
|
|
|
while (pdev) {
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *parent;
|
|
|
|
|
|
|
|
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
|
|
|
|
parent = &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pdev = pdev->bus->self;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pci_dev *parent;
|
|
|
|
uint8_t bcomp, dcomp, fcomp;
|
|
|
|
int64_t rc;
|
|
|
|
long rid_end, rid;
|
|
|
|
|
|
|
|
/* Currently, we just deconfigure VF PE. Bus PE will always there.*/
|
|
|
|
if (pe->pbus) {
|
|
|
|
int count;
|
|
|
|
|
|
|
|
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
|
|
|
|
parent = pe->pbus->self;
|
|
|
|
if (pe->flags & PNV_IODA_PE_BUS_ALL)
|
|
|
|
count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
|
|
|
|
else
|
|
|
|
count = 1;
|
|
|
|
|
|
|
|
switch(count) {
|
|
|
|
case 1: bcomp = OpalPciBusAll; break;
|
|
|
|
case 2: bcomp = OpalPciBus7Bits; break;
|
|
|
|
case 4: bcomp = OpalPciBus6Bits; break;
|
|
|
|
case 8: bcomp = OpalPciBus5Bits; break;
|
|
|
|
case 16: bcomp = OpalPciBus4Bits; break;
|
|
|
|
case 32: bcomp = OpalPciBus3Bits; break;
|
|
|
|
default:
|
|
|
|
dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
|
|
|
|
count);
|
|
|
|
/* Do an exact match only */
|
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
}
|
|
|
|
rid_end = pe->rid + (count << 8);
|
|
|
|
} else {
|
2016-05-20 14:41:34 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2015-03-25 16:23:57 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_VF)
|
|
|
|
parent = pe->parent_dev;
|
|
|
|
else
|
2016-05-20 14:41:34 +08:00
|
|
|
#endif
|
2015-03-25 16:23:57 +08:00
|
|
|
parent = pe->pdev->bus->self;
|
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
|
|
|
|
rid_end = pe->rid + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear the reverse map */
|
|
|
|
for (rid = pe->rid; rid < rid_end; rid++)
|
2016-05-20 14:41:29 +08:00
|
|
|
phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
/* Release from all parents PELT-V */
|
|
|
|
while (parent) {
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(parent);
|
|
|
|
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
|
|
|
|
pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
|
|
|
|
/* XXX What to do in case of error ? */
|
|
|
|
}
|
|
|
|
parent = parent->bus->self;
|
|
|
|
}
|
|
|
|
|
2015-06-23 15:01:13 +08:00
|
|
|
opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
|
2015-03-25 16:23:57 +08:00
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
|
|
|
|
/* Disassociate PE in PELT */
|
|
|
|
rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
|
|
|
|
pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
|
|
|
|
if (rc)
|
|
|
|
pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
|
|
|
|
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
|
|
|
|
bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
|
|
|
|
if (rc)
|
|
|
|
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
|
|
|
|
|
|
|
|
pe->pbus = NULL;
|
|
|
|
pe->pdev = NULL;
|
2016-05-20 14:41:34 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2015-03-25 16:23:57 +08:00
|
|
|
pe->parent_dev = NULL;
|
2016-05-20 14:41:34 +08:00
|
|
|
#endif
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *parent;
|
|
|
|
uint8_t bcomp, dcomp, fcomp;
|
|
|
|
long rc, rid_end, rid;
|
|
|
|
|
|
|
|
/* Bus validation ? */
|
|
|
|
if (pe->pbus) {
|
|
|
|
int count;
|
|
|
|
|
|
|
|
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
|
|
|
|
parent = pe->pbus->self;
|
2012-08-20 11:49:14 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_BUS_ALL)
|
|
|
|
count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
|
|
|
|
else
|
|
|
|
count = 1;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
switch(count) {
|
|
|
|
case 1: bcomp = OpalPciBusAll; break;
|
|
|
|
case 2: bcomp = OpalPciBus7Bits; break;
|
|
|
|
case 4: bcomp = OpalPciBus6Bits; break;
|
|
|
|
case 8: bcomp = OpalPciBus5Bits; break;
|
|
|
|
case 16: bcomp = OpalPciBus4Bits; break;
|
|
|
|
case 32: bcomp = OpalPciBus3Bits; break;
|
|
|
|
default:
|
2015-03-25 16:23:57 +08:00
|
|
|
dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
|
|
|
|
count);
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Do an exact match only */
|
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
}
|
|
|
|
rid_end = pe->rid + (count << 8);
|
|
|
|
} else {
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
if (pe->flags & PNV_IODA_PE_VF)
|
|
|
|
parent = pe->parent_dev;
|
|
|
|
else
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
parent = pe->pdev->bus->self;
|
2011-11-16 01:29:08 +08:00
|
|
|
bcomp = OpalPciBusAll;
|
|
|
|
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
|
|
|
|
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
|
|
|
|
rid_end = pe->rid + 1;
|
|
|
|
}
|
|
|
|
|
2013-11-04 16:32:46 +08:00
|
|
|
/*
|
|
|
|
* Associate PE in PELT. We need add the PE into the
|
|
|
|
* corresponding PELT-V as well. Otherwise, the error
|
|
|
|
* originated from the PE might contribute to other
|
|
|
|
* PEs.
|
|
|
|
*/
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
|
|
|
|
bcomp, dcomp, fcomp, OPAL_MAP_PE);
|
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2013-11-04 16:32:46 +08:00
|
|
|
|
2015-12-17 10:43:13 +08:00
|
|
|
/*
|
|
|
|
* Configure PELTV. NPUs don't have a PELTV table so skip
|
|
|
|
* configuration on them.
|
|
|
|
*/
|
|
|
|
if (phb->type != PNV_PHB_NPU)
|
|
|
|
pnv_ioda_set_peltv(phb, pe, true);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Setup reverse map */
|
|
|
|
for (rid = pe->rid; rid < rid_end; rid++)
|
|
|
|
phb->ioda.pe_rmap[rid] = pe->pe_number;
|
|
|
|
|
|
|
|
/* Setup one MVTs on IODA1 */
|
2014-11-12 10:36:09 +08:00
|
|
|
if (phb->type != PNV_PHB_IODA1) {
|
|
|
|
pe->mve_number = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
pe->mve_number = pe->pe_number;
|
|
|
|
rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_err(pe, "OPAL error %ld setting up MVE %x\n",
|
2014-11-12 10:36:09 +08:00
|
|
|
rc, pe->mve_number);
|
|
|
|
pe->mve_number = -1;
|
|
|
|
} else {
|
|
|
|
rc = opal_pci_set_mve_enable(phb->opal_id,
|
|
|
|
pe->mve_number, OPAL_ENABLE_MVE);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (rc) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_err(pe, "OPAL error %ld enabling MVE %x\n",
|
2011-11-16 01:29:08 +08:00
|
|
|
rc, pe->mve_number);
|
|
|
|
pe->mve_number = -1;
|
|
|
|
}
|
2014-11-12 10:36:09 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2014-11-12 10:36:09 +08:00
|
|
|
out:
|
2011-11-16 01:29:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
|
|
|
|
{
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
|
|
|
int i;
|
|
|
|
struct resource *res, res2;
|
|
|
|
resource_size_t size;
|
|
|
|
u16 num_vfs;
|
|
|
|
|
|
|
|
if (!dev->is_physfn)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "offset" is in VFs. The M64 windows are sized so that when they
|
|
|
|
* are segmented, each segment is the same size as the IOV BAR.
|
|
|
|
* Each segment is in a separate PE, and the high order bits of the
|
|
|
|
* address are the PE number. Therefore, each VF's BAR is in a
|
|
|
|
* separate PE, and changing the IOV BAR start address changes the
|
|
|
|
* range of PEs the VFs are in.
|
|
|
|
*/
|
|
|
|
num_vfs = pdn->num_vfs;
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &dev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || !res->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The actual IOV BAR range is determined by the start address
|
|
|
|
* and the actual size for num_vfs VFs BAR. This check is to
|
|
|
|
* make sure that after shifting, the range will not overlap
|
|
|
|
* with another device.
|
|
|
|
*/
|
|
|
|
size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
|
|
|
|
res2.flags = res->flags;
|
|
|
|
res2.start = res->start + (size * offset);
|
|
|
|
res2.end = res2.start + (size * num_vfs) - 1;
|
|
|
|
|
|
|
|
if (res2.end > res->end) {
|
|
|
|
dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
|
|
|
|
i, &res2, res, num_vfs, offset);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After doing so, there would be a "hole" in the /proc/iomem when
|
|
|
|
* offset is a positive value. It looks like the device return some
|
|
|
|
* mmio back to the system, which actually no one could use it.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &dev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || !res->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
|
|
|
|
res2 = *res;
|
|
|
|
res->start += size * offset;
|
|
|
|
|
2015-07-20 18:14:58 +08:00
|
|
|
dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
|
|
|
|
i, &res2, res, (offset > 0) ? "En" : "Dis",
|
|
|
|
num_vfs, offset);
|
2015-03-25 16:23:57 +08:00
|
|
|
pci_update_resource(dev, i + PCI_IOV_RESOURCES);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
if (!pdn) {
|
|
|
|
pr_err("%s: Device tree node not associated properly\n",
|
|
|
|
pci_name(dev));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (pdn->pe_number != IODA_INVALID_PE)
|
|
|
|
return NULL;
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
pe = pnv_ioda_alloc_pe(phb);
|
|
|
|
if (!pe) {
|
2011-11-16 01:29:08 +08:00
|
|
|
pr_warning("%s: Not enough PE# available, disabling device\n",
|
|
|
|
pci_name(dev));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: We get only one ref to the pci_dev for the pdn, not for the
|
|
|
|
* pointer in the PE data structure, both should be destroyed at the
|
|
|
|
* same time. However, this needs to be looked at more closely again
|
|
|
|
* once we actually start removing things (Hotplug, SR-IOV, ...)
|
|
|
|
*
|
|
|
|
* At some point we want to remove the PDN completely anyways
|
|
|
|
*/
|
|
|
|
pci_dev_get(dev);
|
|
|
|
pdn->pcidev = dev;
|
2016-05-03 13:41:36 +08:00
|
|
|
pdn->pe_number = pe->pe_number;
|
2015-12-17 10:43:13 +08:00
|
|
|
pe->flags = PNV_IODA_PE_DEV;
|
2011-11-16 01:29:08 +08:00
|
|
|
pe->pdev = dev;
|
|
|
|
pe->pbus = NULL;
|
|
|
|
pe->mve_number = -1;
|
|
|
|
pe->rid = dev->bus->number << 8 | pdn->devfn;
|
|
|
|
|
|
|
|
pe_info(pe, "Associated device to PE\n");
|
|
|
|
|
|
|
|
if (pnv_ioda_configure_pe(phb, pe)) {
|
|
|
|
/* XXX What do we do here ? */
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2011-11-16 01:29:08 +08:00
|
|
|
pdn->pe_number = IODA_INVALID_PE;
|
|
|
|
pe->pdev = NULL;
|
|
|
|
pci_dev_put(dev);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-12 13:47:10 +08:00
|
|
|
/* Put PE to the list */
|
|
|
|
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
return pe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
if (pdn == NULL) {
|
|
|
|
pr_warn("%s: No device node associated with device !\n",
|
|
|
|
pci_name(dev));
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-20 14:41:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In partial hotplug case, the PCI device might be still
|
|
|
|
* associated with the PE and needn't attach it to the PE
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (pdn->pe_number != IODA_INVALID_PE)
|
|
|
|
continue;
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
pe->device_count++;
|
2015-12-17 10:43:11 +08:00
|
|
|
pdn->pcidev = dev;
|
2011-11-16 01:29:08 +08:00
|
|
|
pdn->pe_number = pe->pe_number;
|
2012-08-20 11:49:14 +08:00
|
|
|
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
2011-11-16 01:29:08 +08:00
|
|
|
pnv_ioda_setup_same_PE(dev->subordinate, pe);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-20 11:49:14 +08:00
|
|
|
/*
|
|
|
|
* There're 2 types of PCI bus sensitive PEs: One that is compromised of
|
|
|
|
* single PCI bus. Another one that contains the primary PCI bus and its
|
|
|
|
* subordinate PCI devices and buses. The second type of PE is normally
|
|
|
|
* orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
|
|
|
|
*/
|
2016-05-03 13:41:36 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2012-08-20 11:49:14 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
2011-11-16 01:29:08 +08:00
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_ioda_pe *pe = NULL;
|
2016-05-20 14:41:31 +08:00
|
|
|
unsigned int pe_num;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In partial hotplug case, the PE instance might be still alive.
|
|
|
|
* We should reuse it instead of allocating a new one.
|
|
|
|
*/
|
|
|
|
pe_num = phb->ioda.pe_rmap[bus->number << 8];
|
|
|
|
if (pe_num != IODA_INVALID_PE) {
|
|
|
|
pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
pnv_ioda_setup_same_PE(bus, pe);
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2016-05-20 14:41:32 +08:00
|
|
|
/* PE number for root bus should have been reserved */
|
|
|
|
if (pci_is_root_bus(bus) &&
|
|
|
|
phb->ioda.root_pe_idx != IODA_INVALID_PE)
|
|
|
|
pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* Check if PE is determined by M64 */
|
2016-05-20 14:41:32 +08:00
|
|
|
if (!pe && phb->pick_m64_pe)
|
2016-05-03 13:41:36 +08:00
|
|
|
pe = phb->pick_m64_pe(bus, all);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
|
|
|
/* The PE number isn't pinned by M64 */
|
2016-05-03 13:41:36 +08:00
|
|
|
if (!pe)
|
|
|
|
pe = pnv_ioda_alloc_pe(phb);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
if (!pe) {
|
2012-08-20 11:49:14 +08:00
|
|
|
pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
|
|
|
|
__func__, pci_domain_nr(bus), bus->number);
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
|
2011-11-16 01:29:08 +08:00
|
|
|
pe->pbus = bus;
|
|
|
|
pe->pdev = NULL;
|
|
|
|
pe->mve_number = -1;
|
2012-05-18 09:51:11 +08:00
|
|
|
pe->rid = bus->busn_res.start << 8;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2012-08-20 11:49:14 +08:00
|
|
|
if (all)
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
|
2016-05-03 13:41:36 +08:00
|
|
|
bus->busn_res.start, bus->busn_res.end, pe->pe_number);
|
2012-08-20 11:49:14 +08:00
|
|
|
else
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_info(pe, "Secondary bus %d associated with PE#%x\n",
|
2016-05-03 13:41:36 +08:00
|
|
|
bus->busn_res.start, pe->pe_number);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
if (pnv_ioda_configure_pe(phb, pe)) {
|
|
|
|
/* XXX What do we do here ? */
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2011-11-16 01:29:08 +08:00
|
|
|
pe->pbus = NULL;
|
2016-05-03 13:41:36 +08:00
|
|
|
return NULL;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Associate it with all child devices */
|
|
|
|
pnv_ioda_setup_same_PE(bus, pe);
|
|
|
|
|
2012-08-20 11:49:15 +08:00
|
|
|
/* Put PE to the list */
|
|
|
|
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
2016-05-03 13:41:36 +08:00
|
|
|
|
|
|
|
return pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2016-01-11 13:53:49 +08:00
|
|
|
static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
|
|
|
|
{
|
|
|
|
int pe_num, found_pe = false, rc;
|
|
|
|
long rid;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
struct pci_dev *gpu_pdev;
|
|
|
|
struct pci_dn *npu_pdn;
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Due to a hardware errata PE#0 on the NPU is reserved for
|
|
|
|
* error handling. This means we only have three PEs remaining
|
|
|
|
* which need to be assigned to four links, implying some
|
|
|
|
* links must share PEs.
|
|
|
|
*
|
|
|
|
* To achieve this we assign PEs such that NPUs linking the
|
|
|
|
* same GPU get assigned the same PE.
|
|
|
|
*/
|
|
|
|
gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
|
2016-05-03 13:41:24 +08:00
|
|
|
for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
|
2016-01-11 13:53:49 +08:00
|
|
|
pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
if (!pe->pdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
|
|
|
|
/*
|
|
|
|
* This device has the same peer GPU so should
|
|
|
|
* be assigned the same PE as the existing
|
|
|
|
* peer NPU.
|
|
|
|
*/
|
|
|
|
dev_info(&npu_pdev->dev,
|
2016-11-16 11:02:15 +08:00
|
|
|
"Associating to existing PE %x\n", pe_num);
|
2016-01-11 13:53:49 +08:00
|
|
|
pci_dev_get(npu_pdev);
|
|
|
|
npu_pdn = pci_get_pdn(npu_pdev);
|
|
|
|
rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
|
|
|
|
npu_pdn->pcidev = npu_pdev;
|
|
|
|
npu_pdn->pe_number = pe_num;
|
|
|
|
phb->ioda.pe_rmap[rid] = pe->pe_number;
|
|
|
|
|
|
|
|
/* Map the PE to this link */
|
|
|
|
rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
|
|
|
|
OpalPciBusAll,
|
|
|
|
OPAL_COMPARE_RID_DEVICE_NUMBER,
|
|
|
|
OPAL_COMPARE_RID_FUNCTION_NUMBER,
|
|
|
|
OPAL_MAP_PE);
|
|
|
|
WARN_ON(rc != OPAL_SUCCESS);
|
|
|
|
found_pe = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found_pe)
|
|
|
|
/*
|
|
|
|
* Could not find an existing PE so allocate a new
|
|
|
|
* one.
|
|
|
|
*/
|
|
|
|
return pnv_ioda_setup_dev_PE(npu_pdev);
|
|
|
|
else
|
|
|
|
return pe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
|
2015-12-17 10:43:13 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
|
|
list_for_each_entry(pdev, &bus->devices, bus_list)
|
2016-01-11 13:53:49 +08:00
|
|
|
pnv_ioda_setup_npu_PE(pdev);
|
2015-12-17 10:43:13 +08:00
|
|
|
}
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static void pnv_pci_ioda_setup_PEs(void)
|
2012-08-20 11:49:14 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose, *tmp;
|
2014-07-21 12:42:30 +08:00
|
|
|
struct pnv_phb *phb;
|
2012-08-20 11:49:14 +08:00
|
|
|
|
|
|
|
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
|
2014-07-21 12:42:30 +08:00
|
|
|
phb = hose->private_data;
|
2016-01-11 13:53:50 +08:00
|
|
|
if (phb->type == PNV_PHB_NPU) {
|
|
|
|
/* PE#0 is needed for error reporting */
|
|
|
|
pnv_ioda_reserve_pe(phb, 0);
|
2016-01-11 13:53:49 +08:00
|
|
|
pnv_ioda_setup_npu_PEs(hose->bus);
|
2016-05-20 14:41:31 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:52 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2015-10-22 09:22:16 +08:00
|
|
|
static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
|
2015-03-25 16:23:57 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pci_dn *pdn;
|
2015-03-25 16:23:59 +08:00
|
|
|
int i, j;
|
2015-10-22 09:22:16 +08:00
|
|
|
int m64_bars;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
m64_bars = num_vfs;
|
|
|
|
else
|
|
|
|
m64_bars = 1;
|
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
|
2015-10-22 09:22:16 +08:00
|
|
|
for (j = 0; j < m64_bars; j++) {
|
|
|
|
if (pdn->m64_map[j][i] == IODA_INVALID_M64)
|
2015-03-25 16:23:59 +08:00
|
|
|
continue;
|
|
|
|
opal_pci_phb_mmio_enable(phb->opal_id,
|
2015-10-22 09:22:16 +08:00
|
|
|
OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
|
|
|
|
clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
|
|
|
|
pdn->m64_map[j][i] = IODA_INVALID_M64;
|
2015-03-25 16:23:59 +08:00
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
kfree(pdn->m64_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
|
2015-03-25 16:23:57 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
unsigned int win;
|
|
|
|
struct resource *res;
|
2015-03-25 16:23:59 +08:00
|
|
|
int i, j;
|
2015-03-25 16:23:57 +08:00
|
|
|
int64_t rc;
|
2015-03-25 16:23:59 +08:00
|
|
|
int total_vfs;
|
|
|
|
resource_size_t size, start;
|
|
|
|
int pe_num;
|
2015-10-22 09:22:16 +08:00
|
|
|
int m64_bars;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
2015-03-25 16:23:59 +08:00
|
|
|
total_vfs = pci_sriov_get_totalvfs(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
m64_bars = num_vfs;
|
|
|
|
else
|
|
|
|
m64_bars = 1;
|
|
|
|
|
|
|
|
pdn->m64_map = kmalloc(sizeof(*pdn->m64_map) * m64_bars, GFP_KERNEL);
|
|
|
|
if (!pdn->m64_map)
|
|
|
|
return -ENOMEM;
|
|
|
|
/* Initialize the m64_map to IODA_INVALID_M64 */
|
|
|
|
for (i = 0; i < m64_bars ; i++)
|
|
|
|
for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
|
|
|
|
pdn->m64_map[i][j] = IODA_INVALID_M64;
|
2015-03-25 16:23:59 +08:00
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || !res->parent)
|
|
|
|
continue;
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
for (j = 0; j < m64_bars; j++) {
|
2015-03-25 16:23:59 +08:00
|
|
|
do {
|
|
|
|
win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
|
|
|
|
phb->ioda.m64_bar_idx + 1, 0);
|
|
|
|
|
|
|
|
if (win >= phb->ioda.m64_bar_idx + 1)
|
|
|
|
goto m64_failed;
|
|
|
|
} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_map[j][i] = win;
|
2015-03-25 16:23:59 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
2015-03-25 16:23:59 +08:00
|
|
|
size = pci_iov_resource_size(pdev,
|
|
|
|
PCI_IOV_RESOURCES + i);
|
|
|
|
start = res->start + size * j;
|
|
|
|
} else {
|
|
|
|
size = resource_size(res);
|
|
|
|
start = res->start;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the M64 here */
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
2015-10-22 09:22:19 +08:00
|
|
|
pe_num = pdn->pe_num_map[j];
|
2015-03-25 16:23:59 +08:00
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe_num, OPAL_M64_WINDOW_TYPE,
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_map[j][i], 0);
|
2015-03-25 16:23:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = opal_pci_set_phb_mem_window(phb->opal_id,
|
|
|
|
OPAL_M64_WINDOW_TYPE,
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_map[j][i],
|
2015-03-25 16:23:59 +08:00
|
|
|
start,
|
|
|
|
0, /* unused */
|
|
|
|
size);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
|
|
|
|
win, rc);
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
2015-03-25 16:23:59 +08:00
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
2015-10-22 09:22:16 +08:00
|
|
|
OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
|
2015-03-25 16:23:59 +08:00
|
|
|
else
|
|
|
|
rc = opal_pci_phb_mmio_enable(phb->opal_id,
|
2015-10-22 09:22:16 +08:00
|
|
|
OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-03-25 16:23:59 +08:00
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
|
|
|
|
win, rc);
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
m64_failed:
|
2015-10-22 09:22:16 +08:00
|
|
|
pnv_pci_vf_release_m64(pdev, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:21 +08:00
|
|
|
static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
|
|
|
|
int num);
|
|
|
|
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
int64_t rc;
|
|
|
|
|
2015-06-05 14:35:08 +08:00
|
|
|
tbl = pe->table_group.tables[0];
|
2015-06-05 14:35:21 +08:00
|
|
|
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
2015-03-25 16:23:57 +08:00
|
|
|
if (rc)
|
|
|
|
pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
|
|
|
|
|
2015-06-05 14:35:21 +08:00
|
|
|
pnv_pci_ioda2_set_bypass(pe, false);
|
2015-06-05 14:35:09 +08:00
|
|
|
if (pe->table_group.group) {
|
|
|
|
iommu_group_put(pe->table_group.group);
|
|
|
|
BUG_ON(pe->table_group.group);
|
2015-06-05 14:34:56 +08:00
|
|
|
}
|
2015-06-05 14:35:17 +08:00
|
|
|
pnv_pci_ioda2_table_free_pages(tbl);
|
2015-03-25 16:23:57 +08:00
|
|
|
iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
|
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
|
2015-03-25 16:23:57 +08:00
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pnv_ioda_pe *pe, *pe_n;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
2015-03-25 16:23:59 +08:00
|
|
|
pdn = pci_get_pdn(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
if (!pdev->is_physfn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
|
|
|
|
if (pe->parent_dev != pdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pnv_pci_ioda2_release_dma_pe(pdev, pe);
|
|
|
|
|
|
|
|
/* Remove from list */
|
|
|
|
mutex_lock(&phb->ioda.pe_list_mutex);
|
|
|
|
list_del(&pe->list);
|
|
|
|
mutex_unlock(&phb->ioda.pe_list_mutex);
|
|
|
|
|
|
|
|
pnv_ioda_deconfigure_pe(phb, pe);
|
|
|
|
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnv_pci_sriov_disable(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2015-03-25 16:23:57 +08:00
|
|
|
struct pci_dn *pdn;
|
|
|
|
struct pci_sriov *iov;
|
2015-10-22 09:22:19 +08:00
|
|
|
u16 num_vfs, i;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
iov = pdev->sriov;
|
|
|
|
num_vfs = pdn->num_vfs;
|
|
|
|
|
|
|
|
/* Release VF PEs */
|
2015-10-22 09:22:16 +08:00
|
|
|
pnv_ioda_release_vf_PE(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_IODA2) {
|
2015-10-22 09:22:16 +08:00
|
|
|
if (!pdn->m64_single_mode)
|
2015-10-22 09:22:19 +08:00
|
|
|
pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
/* Release M64 windows */
|
2015-10-22 09:22:16 +08:00
|
|
|
pnv_pci_vf_release_m64(pdev, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
/* Release PE numbers */
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
|
|
|
for (i = 0; i < num_vfs; i++) {
|
2016-05-03 13:41:36 +08:00
|
|
|
if (pdn->pe_num_map[i] == IODA_INVALID_PE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
|
|
|
|
pnv_ioda_free_pe(pe);
|
2015-10-22 09:22:19 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
|
|
|
|
/* Releasing pe_num_map */
|
|
|
|
kfree(pdn->pe_num_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe);
|
|
|
|
static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
int pe_num;
|
|
|
|
u16 vf_index;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
|
|
|
|
if (!pdev->is_physfn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Reserve PE for each VF */
|
|
|
|
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
pe_num = pdn->pe_num_map[vf_index];
|
|
|
|
else
|
|
|
|
pe_num = *pdn->pe_num_map + vf_index;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pe_num];
|
|
|
|
pe->pe_number = pe_num;
|
|
|
|
pe->phb = phb;
|
|
|
|
pe->flags = PNV_IODA_PE_VF;
|
|
|
|
pe->pbus = NULL;
|
|
|
|
pe->parent_dev = pdev;
|
|
|
|
pe->mve_number = -1;
|
|
|
|
pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
|
|
|
|
pci_iov_virtfn_devfn(pdev, vf_index);
|
|
|
|
|
2016-11-16 11:02:15 +08:00
|
|
|
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
|
2015-03-25 16:23:57 +08:00
|
|
|
hose->global_number, pdev->bus->number,
|
|
|
|
PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
|
|
|
|
PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
|
|
|
|
|
|
|
|
if (pnv_ioda_configure_pe(phb, pe)) {
|
|
|
|
/* XXX What do we do here ? */
|
2016-05-03 13:41:36 +08:00
|
|
|
pnv_ioda_free_pe(pe);
|
2015-03-25 16:23:57 +08:00
|
|
|
pe->pdev = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Put PE to the list */
|
|
|
|
mutex_lock(&phb->ioda.pe_list_mutex);
|
|
|
|
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
|
|
|
mutex_unlock(&phb->ioda.pe_list_mutex);
|
|
|
|
|
|
|
|
pnv_pci_ioda2_setup_dma_pe(phb, pe);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
2016-05-03 13:41:36 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2015-03-25 16:23:57 +08:00
|
|
|
struct pci_dn *pdn;
|
|
|
|
int ret;
|
2015-10-22 09:22:19 +08:00
|
|
|
u16 i;
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
bus = pdev->bus;
|
|
|
|
hose = pci_bus_to_host(bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_IODA2) {
|
2015-10-22 09:22:14 +08:00
|
|
|
if (!pdn->vfs_expanded) {
|
|
|
|
dev_info(&pdev->dev, "don't support this SRIOV device"
|
|
|
|
" with non 64bit-prefetchable IOV BAR\n");
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
/*
|
|
|
|
* When M64 BARs functions in Single PE mode, the number of VFs
|
|
|
|
* could be enabled must be less than the number of M64 BARs.
|
|
|
|
*/
|
|
|
|
if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
|
|
|
|
dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:19 +08:00
|
|
|
/* Allocating pe_num_map */
|
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map) * num_vfs,
|
|
|
|
GFP_KERNEL);
|
|
|
|
else
|
|
|
|
pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!pdn->pe_num_map)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
for (i = 0; i < num_vfs; i++)
|
|
|
|
pdn->pe_num_map[i] = IODA_INVALID_PE;
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
/* Calculate available PE for required VFs */
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
|
|
|
for (i = 0; i < num_vfs; i++) {
|
2016-05-03 13:41:36 +08:00
|
|
|
pe = pnv_ioda_alloc_pe(phb);
|
|
|
|
if (!pe) {
|
2015-10-22 09:22:19 +08:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2016-05-03 13:41:36 +08:00
|
|
|
|
|
|
|
pdn->pe_num_map[i] = pe->pe_number;
|
2015-10-22 09:22:19 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mutex_lock(&phb->ioda.pe_alloc_mutex);
|
|
|
|
*pdn->pe_num_map = bitmap_find_next_zero_area(
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.pe_alloc, phb->ioda.total_pe_num,
|
2015-10-22 09:22:19 +08:00
|
|
|
0, num_vfs, 0);
|
2016-05-03 13:41:24 +08:00
|
|
|
if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
|
2015-10-22 09:22:19 +08:00
|
|
|
mutex_unlock(&phb->ioda.pe_alloc_mutex);
|
|
|
|
dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
|
|
|
|
kfree(pdn->pe_num_map);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
mutex_unlock(&phb->ioda.pe_alloc_mutex);
|
|
|
|
}
|
|
|
|
pdn->num_vfs = num_vfs;
|
|
|
|
|
|
|
|
/* Assign M64 window accordingly */
|
2015-03-25 16:23:59 +08:00
|
|
|
ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
|
2015-03-25 16:23:57 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_info(&pdev->dev, "Not enough M64 window resources\n");
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When using one M64 BAR to map one IOV BAR, we need to shift
|
|
|
|
* the IOV BAR according to the PE# allocated to the VFs.
|
|
|
|
* Otherwise, the PE# for the VF will conflict with others.
|
|
|
|
*/
|
2015-10-22 09:22:16 +08:00
|
|
|
if (!pdn->m64_single_mode) {
|
2015-10-22 09:22:19 +08:00
|
|
|
ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
|
2015-03-25 16:23:59 +08:00
|
|
|
if (ret)
|
|
|
|
goto m64_failed;
|
|
|
|
}
|
2015-03-25 16:23:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup VF PEs */
|
|
|
|
pnv_ioda_setup_vf_PE(pdev, num_vfs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
m64_failed:
|
2015-10-22 09:22:19 +08:00
|
|
|
if (pdn->m64_single_mode) {
|
|
|
|
for (i = 0; i < num_vfs; i++) {
|
2016-05-03 13:41:36 +08:00
|
|
|
if (pdn->pe_num_map[i] == IODA_INVALID_PE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
|
|
|
|
pnv_ioda_free_pe(pe);
|
2015-10-22 09:22:19 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
|
|
|
|
|
|
|
|
/* Releasing pe_num_map */
|
|
|
|
kfree(pdn->pe_num_map);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:52 +08:00
|
|
|
int pcibios_sriov_disable(struct pci_dev *pdev)
|
|
|
|
{
|
2015-03-25 16:23:57 +08:00
|
|
|
pnv_pci_sriov_disable(pdev);
|
|
|
|
|
2015-03-25 16:23:52 +08:00
|
|
|
/* Release PCI data */
|
|
|
|
remove_dev_pci_data(pdev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
|
|
|
|
{
|
|
|
|
/* Allocate PCI data */
|
|
|
|
add_dev_pci_data(pdev);
|
2015-03-25 16:23:57 +08:00
|
|
|
|
2015-10-22 09:22:16 +08:00
|
|
|
return pnv_pci_sriov_enable(pdev, num_vfs);
|
2015-03-25 16:23:52 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2013-04-26 03:21:02 +08:00
|
|
|
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2013-05-22 06:58:21 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
2013-04-26 03:21:02 +08:00
|
|
|
struct pnv_ioda_pe *pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:21:02 +08:00
|
|
|
/*
|
|
|
|
* The function can be called while the PE#
|
|
|
|
* hasn't been assigned. Do nothing for the
|
|
|
|
* case.
|
|
|
|
*/
|
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:21:02 +08:00
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
2014-02-11 08:32:38 +08:00
|
|
|
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
|
2015-08-27 14:01:16 +08:00
|
|
|
set_dma_offset(&pdev->dev, pe->tce_bypass_base);
|
2015-06-05 14:35:08 +08:00
|
|
|
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
|
2015-06-05 14:34:54 +08:00
|
|
|
/*
|
|
|
|
* Note: iommu_add_device() will fail here as
|
|
|
|
* for physical PE: the device is already added by now;
|
|
|
|
* for virtual PE: sysfs entries are not ready yet and
|
|
|
|
* tce_iommu_bus_notifier will add the device to a group later.
|
|
|
|
*/
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
powerpc/powernv: Move dma_set_mask() from pnv_phb to pci_controller_ops
Previously, dma_set_mask() on powernv was convoluted:
0) Call dma_set_mask() (a/p/kernel/dma.c)
1) In dma_set_mask(), ppc_md.dma_set_mask() exists, so call it.
2) On powernv, that function pointer is pnv_dma_set_mask().
In pnv_dma_set_mask(), the device is pci, so call pnv_pci_dma_set_mask().
3) In pnv_pci_dma_set_mask(), call pnv_phb->set_dma_mask() if it exists.
4) It only exists in the ioda case, where it points to
pnv_pci_ioda_dma_set_mask(), which is the final function.
So the call chain is:
dma_set_mask() ->
pnv_dma_set_mask() ->
pnv_pci_dma_set_mask() ->
pnv_pci_ioda_dma_set_mask()
Both ppc_md and pnv_phb function pointers are used.
Rip out the ppc_md call, pnv_dma_set_mask() and pnv_pci_dma_set_mask().
Instead:
0) Call dma_set_mask() (a/p/kernel/dma.c)
1) In dma_set_mask(), the device is pci, and pci_controller_ops.dma_set_mask()
exists, so call pci_controller_ops.dma_set_mask()
2) In the ioda case, that points to pnv_pci_ioda_dma_set_mask().
The new call chain is
dma_set_mask() ->
pnv_pci_ioda_dma_set_mask()
Now only the pci_controller_ops function pointer is used.
The fallback paths for p5ioc2 are the same.
Previously, pnv_pci_dma_set_mask() would find no pnv_phb->set_dma_mask()
function, to it would call __set_dma_mask().
Now, dma_set_mask() finds no ppc_md call or pci_controller_ops call,
so it calls __set_dma_mask().
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-04-28 13:12:07 +08:00
|
|
|
static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
|
2014-02-11 08:32:38 +08:00
|
|
|
{
|
powerpc/powernv: Move dma_set_mask() from pnv_phb to pci_controller_ops
Previously, dma_set_mask() on powernv was convoluted:
0) Call dma_set_mask() (a/p/kernel/dma.c)
1) In dma_set_mask(), ppc_md.dma_set_mask() exists, so call it.
2) On powernv, that function pointer is pnv_dma_set_mask().
In pnv_dma_set_mask(), the device is pci, so call pnv_pci_dma_set_mask().
3) In pnv_pci_dma_set_mask(), call pnv_phb->set_dma_mask() if it exists.
4) It only exists in the ioda case, where it points to
pnv_pci_ioda_dma_set_mask(), which is the final function.
So the call chain is:
dma_set_mask() ->
pnv_dma_set_mask() ->
pnv_pci_dma_set_mask() ->
pnv_pci_ioda_dma_set_mask()
Both ppc_md and pnv_phb function pointers are used.
Rip out the ppc_md call, pnv_dma_set_mask() and pnv_pci_dma_set_mask().
Instead:
0) Call dma_set_mask() (a/p/kernel/dma.c)
1) In dma_set_mask(), the device is pci, and pci_controller_ops.dma_set_mask()
exists, so call pci_controller_ops.dma_set_mask()
2) In the ioda case, that points to pnv_pci_ioda_dma_set_mask().
The new call chain is
dma_set_mask() ->
pnv_pci_ioda_dma_set_mask()
Now only the pci_controller_ops function pointer is used.
The fallback paths for p5ioc2 are the same.
Previously, pnv_pci_dma_set_mask() would find no pnv_phb->set_dma_mask()
function, to it would call __set_dma_mask().
Now, dma_set_mask() finds no ppc_md call or pci_controller_ops call,
so it calls __set_dma_mask().
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-04-28 13:12:07 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-02-11 08:32:38 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
uint64_t top;
|
|
|
|
bool bypass = false;
|
|
|
|
|
|
|
|
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
|
|
|
|
return -ENODEV;;
|
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
if (pe->tce_bypass_enabled) {
|
|
|
|
top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
|
|
|
|
bypass = (dma_mask >= top);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bypass) {
|
|
|
|
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
|
|
|
|
set_dma_ops(&pdev->dev, &dma_direct_ops);
|
|
|
|
} else {
|
|
|
|
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
|
|
|
|
set_dma_ops(&pdev->dev, &dma_iommu_ops);
|
|
|
|
}
|
2014-08-01 03:24:37 +08:00
|
|
|
*pdev->dev.dma_mask = dma_mask;
|
2015-12-17 10:43:13 +08:00
|
|
|
|
|
|
|
/* Update peer npu devices */
|
2016-04-29 16:55:20 +08:00
|
|
|
pnv_npu_try_dma_set_bypass(pdev, bypass);
|
2015-12-17 10:43:13 +08:00
|
|
|
|
2014-02-11 08:32:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-07 11:45:54 +08:00
|
|
|
static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
|
2014-09-30 10:39:10 +08:00
|
|
|
{
|
2015-08-07 11:45:54 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2014-09-30 10:39:10 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
u64 end, mask;
|
|
|
|
|
|
|
|
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
if (!pe->tce_bypass_enabled)
|
|
|
|
return __dma_get_required_mask(&pdev->dev);
|
|
|
|
|
|
|
|
|
|
|
|
end = pe->tce_bypass_base + memblock_end_of_DRAM();
|
|
|
|
mask = 1ULL << (fls64(end) - 1);
|
|
|
|
mask += mask - 1;
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2014-07-15 15:00:55 +08:00
|
|
|
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
|
2015-06-05 14:34:53 +08:00
|
|
|
struct pci_bus *bus)
|
2013-07-01 15:54:09 +08:00
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
2015-06-05 14:35:08 +08:00
|
|
|
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
2015-06-24 13:25:27 +08:00
|
|
|
set_dma_offset(&dev->dev, pe->tce_bypass_base);
|
2015-06-05 14:34:54 +08:00
|
|
|
iommu_add_device(&dev->dev);
|
2014-07-15 15:00:55 +08:00
|
|
|
|
2015-06-18 09:41:36 +08:00
|
|
|
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
2015-06-05 14:34:53 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
2013-07-01 15:54:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:14 +08:00
|
|
|
static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
|
|
|
|
bool real_mode)
|
|
|
|
{
|
|
|
|
return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
|
|
|
|
(phb->regs + 0x210);
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
|
2015-06-05 14:35:07 +08:00
|
|
|
unsigned long index, unsigned long npages, bool rm)
|
2013-04-26 03:21:00 +08:00
|
|
|
{
|
2015-06-05 14:35:09 +08:00
|
|
|
struct iommu_table_group_link *tgl = list_first_entry_or_null(
|
|
|
|
&tbl->it_group_list, struct iommu_table_group_link,
|
|
|
|
next);
|
|
|
|
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
2015-06-05 14:35:08 +08:00
|
|
|
struct pnv_ioda_pe, table_group);
|
2016-07-08 14:37:14 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
|
2013-04-26 03:21:00 +08:00
|
|
|
unsigned long start, end, inc;
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
|
|
|
|
end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
|
|
|
|
npages - 1);
|
2013-04-26 03:21:00 +08:00
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
/* p7ioc-style invalidation, 2 TCEs per write */
|
|
|
|
start |= (1ull << 63);
|
|
|
|
end |= (1ull << 63);
|
|
|
|
inc = 16;
|
2013-04-26 03:21:00 +08:00
|
|
|
end |= inc - 1; /* round up end to be different than start */
|
|
|
|
|
|
|
|
mb(); /* Ensure above stores are visible */
|
|
|
|
while (start <= end) {
|
2013-08-28 16:37:43 +08:00
|
|
|
if (rm)
|
2013-10-11 15:23:53 +08:00
|
|
|
__raw_rm_writeq(cpu_to_be64(start), invalidate);
|
2013-08-28 16:37:43 +08:00
|
|
|
else
|
2013-10-11 15:23:53 +08:00
|
|
|
__raw_writeq(cpu_to_be64(start), invalidate);
|
2013-04-26 03:21:00 +08:00
|
|
|
start += inc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The iommu layer will do another mb() for us on build()
|
|
|
|
* and we don't care on free()
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
|
|
|
|
long npages, unsigned long uaddr,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2015-06-05 14:35:07 +08:00
|
|
|
{
|
|
|
|
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
|
|
|
|
attrs);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
if (!ret)
|
2016-07-08 14:37:12 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
|
2015-06-05 14:35:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
|
|
|
|
unsigned long *hpa, enum dma_data_direction *direction)
|
|
|
|
{
|
|
|
|
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
if (!ret)
|
2016-07-08 14:37:12 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
|
2015-06-05 14:35:15 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
|
|
|
|
long npages)
|
|
|
|
{
|
|
|
|
pnv_tce_free(tbl, index, npages);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
|
2015-06-05 14:35:07 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:06 +08:00
|
|
|
static struct iommu_table_ops pnv_ioda1_iommu_ops = {
|
2015-06-05 14:35:07 +08:00
|
|
|
.set = pnv_ioda1_tce_build,
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
.exchange = pnv_ioda1_tce_xchg,
|
|
|
|
#endif
|
2015-06-05 14:35:07 +08:00
|
|
|
.clear = pnv_ioda1_tce_free,
|
2015-06-05 14:35:06 +08:00
|
|
|
.get = pnv_tce_get,
|
|
|
|
};
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
#define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
|
|
|
|
#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
|
|
|
|
#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
|
2016-04-29 16:55:17 +08:00
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
|
2016-04-29 16:55:18 +08:00
|
|
|
{
|
2016-07-08 14:37:14 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
|
2016-07-08 14:37:12 +08:00
|
|
|
const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
|
2016-04-29 16:55:18 +08:00
|
|
|
|
|
|
|
mb(); /* Ensure previous TCE table stores are visible */
|
|
|
|
if (rm)
|
2016-07-08 14:37:14 +08:00
|
|
|
__raw_rm_writeq(cpu_to_be64(val), invalidate);
|
2016-04-29 16:55:18 +08:00
|
|
|
else
|
2016-07-08 14:37:14 +08:00
|
|
|
__raw_writeq(cpu_to_be64(val), invalidate);
|
2016-04-29 16:55:18 +08:00
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
|
2015-06-05 14:35:12 +08:00
|
|
|
{
|
|
|
|
/* 01xb - invalidate TCEs that match the specified PE# */
|
2016-07-08 14:37:14 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
|
2016-07-08 14:37:12 +08:00
|
|
|
unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
|
2015-06-05 14:35:12 +08:00
|
|
|
|
|
|
|
mb(); /* Ensure above stores are visible */
|
2016-07-08 14:37:14 +08:00
|
|
|
__raw_writeq(cpu_to_be64(val), invalidate);
|
2015-06-05 14:35:12 +08:00
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:14 +08:00
|
|
|
static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
|
|
|
|
unsigned shift, unsigned long index,
|
|
|
|
unsigned long npages)
|
2013-04-26 03:21:00 +08:00
|
|
|
{
|
2016-08-03 16:40:45 +08:00
|
|
|
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
|
2013-04-26 03:21:00 +08:00
|
|
|
unsigned long start, end, inc;
|
|
|
|
|
|
|
|
/* We'll invalidate DMA address in PE scope */
|
2016-07-08 14:37:12 +08:00
|
|
|
start = PHB3_TCE_KILL_INVAL_ONE;
|
2016-07-08 14:37:14 +08:00
|
|
|
start |= (pe->pe_number & 0xFF);
|
2013-04-26 03:21:00 +08:00
|
|
|
end = start;
|
|
|
|
|
|
|
|
/* Figure out the start, end and step */
|
2015-06-05 14:35:07 +08:00
|
|
|
start |= (index << shift);
|
|
|
|
end |= ((index + npages - 1) << shift);
|
2014-06-06 16:44:01 +08:00
|
|
|
inc = (0x1ull << shift);
|
2013-04-26 03:21:00 +08:00
|
|
|
mb();
|
|
|
|
|
|
|
|
while (start <= end) {
|
2013-08-28 16:37:43 +08:00
|
|
|
if (rm)
|
2013-10-11 15:23:53 +08:00
|
|
|
__raw_rm_writeq(cpu_to_be64(start), invalidate);
|
2013-08-28 16:37:43 +08:00
|
|
|
else
|
2013-10-11 15:23:53 +08:00
|
|
|
__raw_writeq(cpu_to_be64(start), invalidate);
|
2013-04-26 03:21:00 +08:00
|
|
|
start += inc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:37:15 +08:00
|
|
|
static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
|
|
|
|
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
|
|
|
|
pnv_pci_phb3_tce_invalidate_pe(pe);
|
|
|
|
else
|
|
|
|
opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
|
|
|
|
pe->pe_number, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:13 +08:00
|
|
|
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
|
|
|
|
unsigned long index, unsigned long npages, bool rm)
|
|
|
|
{
|
|
|
|
struct iommu_table_group_link *tgl;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
|
|
|
|
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
|
|
|
struct pnv_ioda_pe, table_group);
|
2016-07-08 14:37:15 +08:00
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
unsigned int shift = tbl->it_page_shift;
|
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_NPU) {
|
2016-04-29 16:55:18 +08:00
|
|
|
/*
|
|
|
|
* The NVLink hardware does not support TCE kill
|
|
|
|
* per TCE entry so we have to invalidate
|
|
|
|
* the entire cache for it.
|
|
|
|
*/
|
2016-07-08 14:37:15 +08:00
|
|
|
pnv_pci_phb3_tce_invalidate_entire(phb, rm);
|
2016-04-29 16:55:23 +08:00
|
|
|
continue;
|
|
|
|
}
|
2016-07-08 14:37:15 +08:00
|
|
|
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
|
|
|
|
pnv_pci_phb3_tce_invalidate(pe, rm, shift,
|
|
|
|
index, npages);
|
|
|
|
else if (rm)
|
|
|
|
opal_rm_pci_tce_kill(phb->opal_id,
|
|
|
|
OPAL_PCI_TCE_KILL_PAGES,
|
|
|
|
pe->pe_number, 1u << shift,
|
|
|
|
index << shift, npages);
|
|
|
|
else
|
|
|
|
opal_pci_tce_kill(phb->opal_id,
|
|
|
|
OPAL_PCI_TCE_KILL_PAGES,
|
|
|
|
pe->pe_number, 1u << shift,
|
|
|
|
index << shift, npages);
|
2015-06-05 14:35:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
|
|
|
|
long npages, unsigned long uaddr,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2013-04-26 03:21:00 +08:00
|
|
|
{
|
2015-06-05 14:35:07 +08:00
|
|
|
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
|
|
|
|
attrs);
|
2013-04-26 03:21:00 +08:00
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
if (!ret)
|
2015-06-05 14:35:07 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
|
|
|
|
unsigned long *hpa, enum dma_data_direction *direction)
|
|
|
|
{
|
|
|
|
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
if (!ret)
|
2015-06-05 14:35:15 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:07 +08:00
|
|
|
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
|
|
|
|
long npages)
|
|
|
|
{
|
|
|
|
pnv_tce_free(tbl, index, npages);
|
|
|
|
|
2016-07-08 14:37:13 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
|
2013-04-26 03:21:00 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:20 +08:00
|
|
|
static void pnv_ioda2_table_free(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
pnv_pci_ioda2_table_free_pages(tbl);
|
|
|
|
iommu_free_table(tbl, "pnv");
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:06 +08:00
|
|
|
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
|
2015-06-05 14:35:07 +08:00
|
|
|
.set = pnv_ioda2_tce_build,
|
2015-06-05 14:35:15 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
.exchange = pnv_ioda2_tce_xchg,
|
|
|
|
#endif
|
2015-06-05 14:35:07 +08:00
|
|
|
.clear = pnv_ioda2_tce_free,
|
2015-06-05 14:35:06 +08:00
|
|
|
.get = pnv_tce_get,
|
2015-06-05 14:35:20 +08:00
|
|
|
.free = pnv_ioda2_table_free,
|
2015-06-05 14:35:06 +08:00
|
|
|
};
|
|
|
|
|
2016-05-03 13:41:34 +08:00
|
|
|
static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
|
|
|
|
{
|
|
|
|
unsigned int *weight = (unsigned int *)data;
|
|
|
|
|
|
|
|
/* This is quite simplistic. The "base" weight of a device
|
|
|
|
* is 10. 0 means no DMA is to be accounted for it.
|
|
|
|
*/
|
|
|
|
if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
|
|
|
|
dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
|
|
|
|
dev->class == PCI_CLASS_SERIAL_USB_EHCI)
|
|
|
|
*weight += 3;
|
|
|
|
else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
|
|
|
|
*weight += 15;
|
|
|
|
else
|
|
|
|
*weight += 10;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
unsigned int weight = 0;
|
|
|
|
|
|
|
|
/* SRIOV VF has same DMA32 weight as its PF */
|
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
|
|
|
|
pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
|
|
|
|
return weight;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
|
|
|
|
pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
|
|
|
|
} else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
|
|
list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
|
|
|
|
pnv_pci_ioda_dev_dma_weight(pdev, &weight);
|
|
|
|
} else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
|
|
|
|
pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
|
|
|
|
}
|
|
|
|
|
|
|
|
return weight;
|
|
|
|
}
|
|
|
|
|
2016-05-03 13:41:32 +08:00
|
|
|
static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
|
2016-05-05 10:04:16 +08:00
|
|
|
struct pnv_ioda_pe *pe)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
struct page *tce_mem = NULL;
|
|
|
|
struct iommu_table *tbl;
|
2016-05-05 10:04:16 +08:00
|
|
|
unsigned int weight, total_weight = 0;
|
|
|
|
unsigned int tce32_segsz, base, segs, avail, i;
|
2011-11-16 01:29:08 +08:00
|
|
|
int64_t rc;
|
|
|
|
void *addr;
|
|
|
|
|
|
|
|
/* XXX FIXME: Handle 64-bit only DMA devices */
|
|
|
|
/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
|
|
|
|
/* XXX FIXME: Allocate multi-level tables on PHB3 */
|
2016-05-05 10:04:16 +08:00
|
|
|
weight = pnv_pci_ioda_pe_dma_weight(pe);
|
|
|
|
if (!weight)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
|
|
|
|
&total_weight);
|
|
|
|
segs = (weight * phb->ioda.dma32_count) / total_weight;
|
|
|
|
if (!segs)
|
|
|
|
segs = 1;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-05-05 10:04:16 +08:00
|
|
|
/*
|
|
|
|
* Allocate contiguous DMA32 segments. We begin with the expected
|
|
|
|
* number of segments. With one more attempt, the number of DMA32
|
|
|
|
* segments to be allocated is decreased by one until one segment
|
|
|
|
* is allocated successfully.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
|
|
|
|
for (avail = 0, i = base; i < base + segs; i++) {
|
|
|
|
if (phb->ioda.dma32_segmap[i] ==
|
|
|
|
IODA_INVALID_PE)
|
|
|
|
avail++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avail == segs)
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
} while (--segs);
|
|
|
|
|
|
|
|
if (!segs) {
|
|
|
|
pe_warn(pe, "No available DMA32 segments\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
2015-06-05 14:35:09 +08:00
|
|
|
tbl = pnv_pci_table_alloc(phb->hose->node);
|
2015-06-05 14:35:08 +08:00
|
|
|
iommu_register_group(&pe->table_group, phb->hose->global_number,
|
|
|
|
pe->pe_number);
|
2015-06-05 14:35:09 +08:00
|
|
|
pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
|
2015-06-05 14:34:55 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Grab a 32-bit TCE table */
|
2016-05-05 10:04:16 +08:00
|
|
|
pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
|
|
|
|
weight, total_weight, base, segs);
|
2011-11-16 01:29:08 +08:00
|
|
|
pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
|
2016-05-03 13:41:33 +08:00
|
|
|
base * PNV_IODA1_DMA32_SEGSIZE,
|
|
|
|
(base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* XXX Currently, we allocate one big contiguous table for the
|
|
|
|
* TCEs. We only really need one chunk per 256M of TCE space
|
|
|
|
* (ie per segment) but that's an optimization for later, it
|
|
|
|
* requires some added smarts with our get/put_tce implementation
|
2016-05-03 13:41:33 +08:00
|
|
|
*
|
|
|
|
* Each TCE page is 4KB in size and each TCE entry occupies 8
|
|
|
|
* bytes
|
2011-11-16 01:29:08 +08:00
|
|
|
*/
|
2016-05-03 13:41:33 +08:00
|
|
|
tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
|
2011-11-16 01:29:08 +08:00
|
|
|
tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
|
2016-05-03 13:41:33 +08:00
|
|
|
get_order(tce32_segsz * segs));
|
2011-11-16 01:29:08 +08:00
|
|
|
if (!tce_mem) {
|
|
|
|
pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
addr = page_address(tce_mem);
|
2016-05-03 13:41:33 +08:00
|
|
|
memset(addr, 0, tce32_segsz * segs);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Configure HW */
|
|
|
|
for (i = 0; i < segs; i++) {
|
|
|
|
rc = opal_pci_map_pe_dma_window(phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
base + i, 1,
|
2016-05-03 13:41:33 +08:00
|
|
|
__pa(addr) + tce32_segsz * i,
|
|
|
|
tce32_segsz, IOMMU_PAGE_SIZE_4K);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, " Failed to configure 32-bit TCE table,"
|
|
|
|
" err %ld\n", rc);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 10:04:16 +08:00
|
|
|
/* Setup DMA32 segment mapping */
|
|
|
|
for (i = base; i < base + segs; i++)
|
|
|
|
phb->ioda.dma32_segmap[i] = pe->pe_number;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Setup linux iommu table */
|
2016-05-03 13:41:33 +08:00
|
|
|
pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
|
|
|
|
base * PNV_IODA1_DMA32_SEGSIZE,
|
|
|
|
IOMMU_PAGE_SHIFT_4K);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2015-06-05 14:35:06 +08:00
|
|
|
tbl->it_ops = &pnv_ioda1_iommu_ops;
|
2015-06-05 14:35:20 +08:00
|
|
|
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
|
|
|
|
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
|
2011-11-16 01:29:08 +08:00
|
|
|
iommu_init_table(tbl, phb->hose->node);
|
|
|
|
|
2015-03-25 16:23:57 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_DEV) {
|
2015-06-05 14:34:54 +08:00
|
|
|
/*
|
|
|
|
* Setting table base here only for carrying iommu_group
|
|
|
|
* further down to let iommu_add_device() do the job.
|
|
|
|
* pnv_pci_ioda_dma_dev_setup will override it later anyway.
|
|
|
|
*/
|
|
|
|
set_iommu_table_base(&pe->pdev->dev, tbl);
|
|
|
|
iommu_add_device(&pe->pdev->dev);
|
2015-06-05 14:34:55 +08:00
|
|
|
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
2015-06-05 14:34:53 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
2013-07-01 15:54:09 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
return;
|
|
|
|
fail:
|
|
|
|
/* XXX Failure: Try to fallback to 64-bit only ? */
|
|
|
|
if (tce_mem)
|
2016-05-03 13:41:33 +08:00
|
|
|
__free_pages(tce_mem, get_order(tce32_segsz * segs));
|
2015-06-05 14:35:09 +08:00
|
|
|
if (tbl) {
|
|
|
|
pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
|
|
|
|
iommu_free_table(tbl, "pnv");
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:18 +08:00
|
|
|
static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
|
|
|
|
int num, struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
int64_t rc;
|
2015-06-05 14:35:19 +08:00
|
|
|
const unsigned long size = tbl->it_indirect_levels ?
|
|
|
|
tbl->it_level_size : tbl->it_size;
|
2015-06-05 14:35:18 +08:00
|
|
|
const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
|
|
|
|
const __u64 win_size = tbl->it_size << tbl->it_page_shift;
|
|
|
|
|
2015-06-05 14:35:20 +08:00
|
|
|
pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
|
2015-06-05 14:35:18 +08:00
|
|
|
start_addr, start_addr + win_size - 1,
|
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map TCE table through TVT. The TVE index is the PE number
|
|
|
|
* shifted by 1 bit for 32-bits DMA space.
|
|
|
|
*/
|
|
|
|
rc = opal_pci_map_pe_dma_window(phb->opal_id,
|
|
|
|
pe->pe_number,
|
2015-06-05 14:35:20 +08:00
|
|
|
(pe->pe_number << 1) + num,
|
2015-06-05 14:35:19 +08:00
|
|
|
tbl->it_indirect_levels + 1,
|
2015-06-05 14:35:18 +08:00
|
|
|
__pa(tbl->it_base),
|
2015-06-05 14:35:19 +08:00
|
|
|
size << 3,
|
2015-06-05 14:35:18 +08:00
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_pci_link_table_and_group(phb->hose->node, num,
|
|
|
|
tbl, &pe->table_group);
|
2016-09-15 15:03:06 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate_pe(pe);
|
2015-06-05 14:35:18 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
|
2014-02-11 08:32:38 +08:00
|
|
|
{
|
|
|
|
uint16_t window_id = (pe->pe_number << 1 ) + 1;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
|
|
|
|
if (enable) {
|
|
|
|
phys_addr_t top = memblock_end_of_DRAM();
|
|
|
|
|
|
|
|
top = roundup_pow_of_two(top);
|
|
|
|
rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
window_id,
|
|
|
|
pe->tce_bypass_base,
|
|
|
|
top);
|
|
|
|
} else {
|
|
|
|
rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
|
|
|
|
pe->pe_number,
|
|
|
|
window_id,
|
|
|
|
pe->tce_bypass_base,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
if (rc)
|
|
|
|
pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
|
|
|
|
else
|
|
|
|
pe->tce_bypass_enabled = enable;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:20 +08:00
|
|
|
static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|
|
|
__u32 page_shift, __u64 window_size, __u32 levels,
|
|
|
|
struct iommu_table *tbl);
|
|
|
|
|
|
|
|
static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
|
|
|
|
int num, __u32 page_shift, __u64 window_size, __u32 levels,
|
|
|
|
struct iommu_table **ptbl)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
int nid = pe->phb->hose->node;
|
|
|
|
__u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
|
|
|
|
long ret;
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
|
|
|
|
tbl = pnv_pci_table_alloc(nid);
|
|
|
|
if (!tbl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = pnv_pci_ioda2_table_alloc_pages(nid,
|
|
|
|
bus_offset, page_shift, window_size,
|
|
|
|
levels, tbl);
|
|
|
|
if (ret) {
|
|
|
|
iommu_free_table(tbl, "pnv");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
tbl->it_ops = &pnv_ioda2_iommu_ops;
|
|
|
|
|
|
|
|
*ptbl = tbl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl = NULL;
|
|
|
|
long rc;
|
|
|
|
|
2015-09-05 02:22:52 +08:00
|
|
|
/*
|
|
|
|
* crashkernel= specifies the kdump kernel's maximum memory at
|
|
|
|
* some offset and there is no guaranteed the result is a power
|
|
|
|
* of 2, which will cause errors later.
|
|
|
|
*/
|
|
|
|
const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
|
|
|
|
|
powerpc/powernv/pci-ioda: fix 32-bit TCE table init in kdump kernel
When attempting to kdump with the 4.2 kernel, we see for each PCI
device:
pci 0003:01 : [PE# 000] Assign DMA32 space
pci 0003:01 : [PE# 000] Setting up 32-bit TCE table at 0..80000000
pci 0003:01 : [PE# 000] Failed to create 32-bit TCE table, err -22
PCI: Domain 0004 has 8 available 32-bit DMA segments
PCI: 4 PE# for a total weight of 70
pci 0004:01 : [PE# 002] Assign DMA32 space
pci 0004:01 : [PE# 002] Setting up 32-bit TCE table at 0..80000000
pci 0004:01 : [PE# 002] Failed to create 32-bit TCE table, err -22
pci 0004:0d : [PE# 005] Assign DMA32 space
pci 0004:0d : [PE# 005] Setting up 32-bit TCE table at 0..80000000
pci 0004:0d : [PE# 005] Failed to create 32-bit TCE table, err -22
pci 0004:0e : [PE# 006] Assign DMA32 space
pci 0004:0e : [PE# 006] Setting up 32-bit TCE table at 0..80000000
pci 0004:0e : [PE# 006] Failed to create 32-bit TCE table, err -22
pci 0004:10 : [PE# 008] Assign DMA32 space
pci 0004:10 : [PE# 008] Setting up 32-bit TCE table at 0..80000000
pci 0004:10 : [PE# 008] Failed to create 32-bit TCE table, err -22
and eventually the kdump kernel fails to boot as none of the PCI devices
(including the disk controller) are successfully initialized.
The EINVAL response is because the DMA window (the 2GB base window) is
larger than the kdump kernel's reserved memory (crashkernel=, in this
case specified to be 1024M). The check in question,
if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
is a valid sanity check for pnv_pci_ioda2_table_alloc_pages(), so adjust
the caller to pass in a smaller window size if our maximum memory value
is smaller than the DMA window.
After this change, the PCI devices successfully set up the 32-bit TCE
table and kdump succeeds.
The problem was seen on a Firestone machine originally.
Fixes: aca6913f5551 ("powerpc/powernv/ioda2: Introduce helpers to allocate TCE pages")
Cc: stable@vger.kernel.org # 4.2
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[mpe: Coding style pedantry, use u64, change the indentation]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-02 23:39:28 +08:00
|
|
|
/*
|
|
|
|
* In memory constrained environments, e.g. kdump kernel, the
|
|
|
|
* DMA window can be larger than available memory, which will
|
|
|
|
* cause errors later.
|
|
|
|
*/
|
2015-09-05 02:22:52 +08:00
|
|
|
const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
|
powerpc/powernv/pci-ioda: fix 32-bit TCE table init in kdump kernel
When attempting to kdump with the 4.2 kernel, we see for each PCI
device:
pci 0003:01 : [PE# 000] Assign DMA32 space
pci 0003:01 : [PE# 000] Setting up 32-bit TCE table at 0..80000000
pci 0003:01 : [PE# 000] Failed to create 32-bit TCE table, err -22
PCI: Domain 0004 has 8 available 32-bit DMA segments
PCI: 4 PE# for a total weight of 70
pci 0004:01 : [PE# 002] Assign DMA32 space
pci 0004:01 : [PE# 002] Setting up 32-bit TCE table at 0..80000000
pci 0004:01 : [PE# 002] Failed to create 32-bit TCE table, err -22
pci 0004:0d : [PE# 005] Assign DMA32 space
pci 0004:0d : [PE# 005] Setting up 32-bit TCE table at 0..80000000
pci 0004:0d : [PE# 005] Failed to create 32-bit TCE table, err -22
pci 0004:0e : [PE# 006] Assign DMA32 space
pci 0004:0e : [PE# 006] Setting up 32-bit TCE table at 0..80000000
pci 0004:0e : [PE# 006] Failed to create 32-bit TCE table, err -22
pci 0004:10 : [PE# 008] Assign DMA32 space
pci 0004:10 : [PE# 008] Setting up 32-bit TCE table at 0..80000000
pci 0004:10 : [PE# 008] Failed to create 32-bit TCE table, err -22
and eventually the kdump kernel fails to boot as none of the PCI devices
(including the disk controller) are successfully initialized.
The EINVAL response is because the DMA window (the 2GB base window) is
larger than the kdump kernel's reserved memory (crashkernel=, in this
case specified to be 1024M). The check in question,
if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
is a valid sanity check for pnv_pci_ioda2_table_alloc_pages(), so adjust
the caller to pass in a smaller window size if our maximum memory value
is smaller than the DMA window.
After this change, the PCI devices successfully set up the 32-bit TCE
table and kdump succeeds.
The problem was seen on a Firestone machine originally.
Fixes: aca6913f5551 ("powerpc/powernv/ioda2: Introduce helpers to allocate TCE pages")
Cc: stable@vger.kernel.org # 4.2
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[mpe: Coding style pedantry, use u64, change the indentation]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-02 23:39:28 +08:00
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
|
|
|
|
IOMMU_PAGE_SHIFT_4K,
|
powerpc/powernv/pci-ioda: fix 32-bit TCE table init in kdump kernel
When attempting to kdump with the 4.2 kernel, we see for each PCI
device:
pci 0003:01 : [PE# 000] Assign DMA32 space
pci 0003:01 : [PE# 000] Setting up 32-bit TCE table at 0..80000000
pci 0003:01 : [PE# 000] Failed to create 32-bit TCE table, err -22
PCI: Domain 0004 has 8 available 32-bit DMA segments
PCI: 4 PE# for a total weight of 70
pci 0004:01 : [PE# 002] Assign DMA32 space
pci 0004:01 : [PE# 002] Setting up 32-bit TCE table at 0..80000000
pci 0004:01 : [PE# 002] Failed to create 32-bit TCE table, err -22
pci 0004:0d : [PE# 005] Assign DMA32 space
pci 0004:0d : [PE# 005] Setting up 32-bit TCE table at 0..80000000
pci 0004:0d : [PE# 005] Failed to create 32-bit TCE table, err -22
pci 0004:0e : [PE# 006] Assign DMA32 space
pci 0004:0e : [PE# 006] Setting up 32-bit TCE table at 0..80000000
pci 0004:0e : [PE# 006] Failed to create 32-bit TCE table, err -22
pci 0004:10 : [PE# 008] Assign DMA32 space
pci 0004:10 : [PE# 008] Setting up 32-bit TCE table at 0..80000000
pci 0004:10 : [PE# 008] Failed to create 32-bit TCE table, err -22
and eventually the kdump kernel fails to boot as none of the PCI devices
(including the disk controller) are successfully initialized.
The EINVAL response is because the DMA window (the 2GB base window) is
larger than the kdump kernel's reserved memory (crashkernel=, in this
case specified to be 1024M). The check in question,
if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
is a valid sanity check for pnv_pci_ioda2_table_alloc_pages(), so adjust
the caller to pass in a smaller window size if our maximum memory value
is smaller than the DMA window.
After this change, the PCI devices successfully set up the 32-bit TCE
table and kdump succeeds.
The problem was seen on a Firestone machine originally.
Fixes: aca6913f5551 ("powerpc/powernv/ioda2: Introduce helpers to allocate TCE pages")
Cc: stable@vger.kernel.org # 4.2
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[mpe: Coding style pedantry, use u64, change the indentation]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-02 23:39:28 +08:00
|
|
|
window_size,
|
2015-06-05 14:35:23 +08:00
|
|
|
POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
|
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
|
|
|
|
rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
iommu_init_table(tbl, pe->phb->hose->node);
|
|
|
|
|
|
|
|
rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
|
|
|
|
if (rc) {
|
|
|
|
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
|
|
|
|
rc);
|
|
|
|
pnv_ioda2_table_free(tbl);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pnv_iommu_bypass_disabled)
|
|
|
|
pnv_pci_ioda2_set_bypass(pe, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting table base here only for carrying iommu_group
|
|
|
|
* further down to let iommu_add_device() do the job.
|
|
|
|
* pnv_pci_ioda_dma_dev_setup will override it later anyway.
|
|
|
|
*/
|
|
|
|
if (pe->flags & PNV_IODA_PE_DEV)
|
|
|
|
set_iommu_table_base(&pe->pdev->dev, tbl);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-15 15:49:59 +08:00
|
|
|
#if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
|
|
|
|
static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
|
|
|
|
int num)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
long ret;
|
|
|
|
|
|
|
|
pe_info(pe, "Removing DMA window #%d\n", num);
|
|
|
|
|
|
|
|
ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
|
|
|
|
(pe->pe_number << 1) + num,
|
|
|
|
0/* levels */, 0/* table address */,
|
|
|
|
0/* table size */, 0/* page size */);
|
|
|
|
if (ret)
|
|
|
|
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
|
|
|
|
else
|
2016-09-15 15:03:06 +08:00
|
|
|
pnv_pci_ioda2_tce_invalidate_pe(pe);
|
2015-06-15 15:49:59 +08:00
|
|
|
|
|
|
|
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2015-06-05 14:35:22 +08:00
|
|
|
static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
|
|
|
|
__u64 window_size, __u32 levels)
|
|
|
|
{
|
|
|
|
unsigned long bytes = 0;
|
|
|
|
const unsigned window_shift = ilog2(window_size);
|
|
|
|
unsigned entries_shift = window_shift - page_shift;
|
|
|
|
unsigned table_shift = entries_shift + 3;
|
|
|
|
unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
|
|
|
|
unsigned long direct_table_size;
|
|
|
|
|
|
|
|
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
|
|
|
|
(window_size > memory_hotplug_max()) ||
|
|
|
|
!is_power_of_2(window_size))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Calculate a direct table size from window_size and levels */
|
|
|
|
entries_shift = (entries_shift + levels - 1) / levels;
|
|
|
|
table_shift = entries_shift + 3;
|
|
|
|
table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
|
|
|
|
direct_table_size = 1UL << table_shift;
|
|
|
|
|
|
|
|
for ( ; levels; --levels) {
|
|
|
|
bytes += _ALIGN_UP(tce_table_size, direct_table_size);
|
|
|
|
|
|
|
|
tce_table_size /= direct_table_size;
|
|
|
|
tce_table_size <<= 3;
|
|
|
|
tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
2014-02-11 08:32:38 +08:00
|
|
|
{
|
2015-06-05 14:35:10 +08:00
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
2015-06-05 14:35:23 +08:00
|
|
|
/* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
|
|
|
|
struct iommu_table *tbl = pe->table_group.tables[0];
|
2014-02-11 08:32:38 +08:00
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
pnv_pci_ioda2_set_bypass(pe, false);
|
2015-06-05 14:35:23 +08:00
|
|
|
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
|
|
|
pnv_ioda2_table_free(tbl);
|
2015-06-05 14:35:10 +08:00
|
|
|
}
|
2014-02-11 08:32:38 +08:00
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
|
|
|
|
table_group);
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
pnv_pci_ioda2_setup_default_config(pe);
|
2014-02-11 08:32:38 +08:00
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
|
2015-06-05 14:35:22 +08:00
|
|
|
.get_table_size = pnv_pci_ioda2_get_table_size,
|
2015-06-05 14:35:20 +08:00
|
|
|
.create_table = pnv_pci_ioda2_create_table,
|
|
|
|
.set_window = pnv_pci_ioda2_set_window,
|
|
|
|
.unset_window = pnv_pci_ioda2_unset_window,
|
2015-06-05 14:35:10 +08:00
|
|
|
.take_ownership = pnv_ioda2_take_ownership,
|
|
|
|
.release_ownership = pnv_ioda2_release_ownership,
|
|
|
|
};
|
2016-04-29 16:55:24 +08:00
|
|
|
|
|
|
|
static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pnv_ioda_pe **ptmppe = opaque;
|
|
|
|
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
|
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
hose = pci_bus_to_host(pdev->bus);
|
|
|
|
phb = hose->private_data;
|
|
|
|
if (phb->type != PNV_PHB_NPU)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*ptmppe = &phb->ioda.pe_array[pdn->pe_number];
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This returns PE of associated NPU.
|
|
|
|
* This assumes that NPU is in the same IOMMU group with GPU and there is
|
|
|
|
* no other PEs.
|
|
|
|
*/
|
|
|
|
static struct pnv_ioda_pe *gpe_table_group_to_npe(
|
|
|
|
struct iommu_table_group *table_group)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *npe = NULL;
|
|
|
|
int ret = iommu_group_for_each_dev(table_group->group, &npe,
|
|
|
|
gpe_table_group_to_npe_cb);
|
|
|
|
|
|
|
|
BUG_ON(!ret || !npe);
|
|
|
|
|
|
|
|
return npe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group,
|
|
|
|
int num, struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
long ret = pnv_pci_ioda2_set_window(table_group, num, tbl);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = pnv_npu_set_window(gpe_table_group_to_npe(table_group), num, tbl);
|
|
|
|
if (ret)
|
|
|
|
pnv_pci_ioda2_unset_window(table_group, num);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long pnv_pci_ioda2_npu_unset_window(
|
|
|
|
struct iommu_table_group *table_group,
|
|
|
|
int num)
|
|
|
|
{
|
|
|
|
long ret = pnv_pci_ioda2_unset_window(table_group, num);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return pnv_npu_unset_window(gpe_table_group_to_npe(table_group), num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Detach NPU first as pnv_ioda2_take_ownership() will destroy
|
|
|
|
* the iommu_table if 32bit DMA is enabled.
|
|
|
|
*/
|
|
|
|
pnv_npu_take_ownership(gpe_table_group_to_npe(table_group));
|
|
|
|
pnv_ioda2_take_ownership(table_group);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
|
|
|
|
.get_table_size = pnv_pci_ioda2_get_table_size,
|
|
|
|
.create_table = pnv_pci_ioda2_create_table,
|
|
|
|
.set_window = pnv_pci_ioda2_npu_set_window,
|
|
|
|
.unset_window = pnv_pci_ioda2_npu_unset_window,
|
|
|
|
.take_ownership = pnv_ioda2_npu_take_ownership,
|
|
|
|
.release_ownership = pnv_ioda2_release_ownership,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void pnv_pci_ioda_setup_iommu_api(void)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose, *tmp;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
struct pnv_ioda_pe *pe, *gpe;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we have all PHBs discovered, time to add NPU devices to
|
|
|
|
* the corresponding IOMMU groups.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
|
|
|
if (phb->type != PNV_PHB_NPU)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
|
|
gpe = pnv_pci_npu_setup_iommu(pe);
|
|
|
|
if (gpe)
|
|
|
|
gpe->table_group.ops = &pnv_pci_ioda2_npu_ops;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_IOMMU_API */
|
|
|
|
static void pnv_pci_ioda_setup_iommu_api(void) { };
|
2015-06-05 14:35:10 +08:00
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
|
|
|
|
unsigned levels, unsigned long limit,
|
2015-07-20 18:45:51 +08:00
|
|
|
unsigned long *current_offset, unsigned long *total_allocated)
|
2013-04-26 03:21:01 +08:00
|
|
|
{
|
|
|
|
struct page *tce_mem = NULL;
|
2015-06-05 14:35:19 +08:00
|
|
|
__be64 *addr, *tmp;
|
2015-06-05 14:35:17 +08:00
|
|
|
unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
|
2015-06-05 14:35:19 +08:00
|
|
|
unsigned long allocated = 1UL << (order + PAGE_SHIFT);
|
|
|
|
unsigned entries = 1UL << (shift - 3);
|
|
|
|
long i;
|
2015-06-05 14:35:17 +08:00
|
|
|
|
|
|
|
tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
|
|
|
|
if (!tce_mem) {
|
|
|
|
pr_err("Failed to allocate a TCE memory, order=%d\n", order);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
addr = page_address(tce_mem);
|
2015-06-05 14:35:19 +08:00
|
|
|
memset(addr, 0, allocated);
|
2015-07-20 18:45:51 +08:00
|
|
|
*total_allocated += allocated;
|
2015-06-05 14:35:19 +08:00
|
|
|
|
|
|
|
--levels;
|
|
|
|
if (!levels) {
|
|
|
|
*current_offset += allocated;
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < entries; ++i) {
|
|
|
|
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
|
2015-07-20 18:45:51 +08:00
|
|
|
levels, limit, current_offset, total_allocated);
|
2015-06-05 14:35:19 +08:00
|
|
|
if (!tmp)
|
|
|
|
break;
|
|
|
|
|
|
|
|
addr[i] = cpu_to_be64(__pa(tmp) |
|
|
|
|
TCE_PCI_READ | TCE_PCI_WRITE);
|
|
|
|
|
|
|
|
if (*current_offset >= limit)
|
|
|
|
break;
|
|
|
|
}
|
2015-06-05 14:35:17 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
|
|
|
|
unsigned long size, unsigned level);
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
2015-06-05 14:35:19 +08:00
|
|
|
__u32 page_shift, __u64 window_size, __u32 levels,
|
|
|
|
struct iommu_table *tbl)
|
2015-06-05 14:35:17 +08:00
|
|
|
{
|
2013-04-26 03:21:01 +08:00
|
|
|
void *addr;
|
2015-07-20 18:45:51 +08:00
|
|
|
unsigned long offset = 0, level_shift, total_allocated = 0;
|
2015-06-05 14:35:17 +08:00
|
|
|
const unsigned window_shift = ilog2(window_size);
|
|
|
|
unsigned entries_shift = window_shift - page_shift;
|
|
|
|
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
|
|
|
|
const unsigned long tce_table_size = 1UL << table_shift;
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
/* Adjust direct table size from window_size and levels */
|
|
|
|
entries_shift = (entries_shift + levels - 1) / levels;
|
|
|
|
level_shift = entries_shift + 3;
|
|
|
|
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
/* Allocate TCE table */
|
2015-06-05 14:35:19 +08:00
|
|
|
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
2015-07-20 18:45:51 +08:00
|
|
|
levels, tce_table_size, &offset, &total_allocated);
|
2015-06-05 14:35:19 +08:00
|
|
|
|
|
|
|
/* addr==NULL means that the first level allocation failed */
|
2015-06-05 14:35:17 +08:00
|
|
|
if (!addr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
/*
|
|
|
|
* First level was allocated but some lower level failed as
|
|
|
|
* we did not allocate as much as we wanted,
|
|
|
|
* release partially allocated table.
|
|
|
|
*/
|
|
|
|
if (offset < tce_table_size) {
|
|
|
|
pnv_pci_ioda2_table_do_free_pages(addr,
|
|
|
|
1ULL << (level_shift - 3), levels - 1);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
/* Setup linux iommu table */
|
|
|
|
pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
|
|
|
|
page_shift);
|
2015-06-05 14:35:19 +08:00
|
|
|
tbl->it_level_size = 1ULL << (level_shift - 3);
|
|
|
|
tbl->it_indirect_levels = levels - 1;
|
2015-07-20 18:45:51 +08:00
|
|
|
tbl->it_allocated_size = total_allocated;
|
2015-06-05 14:35:17 +08:00
|
|
|
|
|
|
|
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
|
|
|
|
window_size, tce_table_size, bus_offset);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
|
|
|
|
unsigned long size, unsigned level)
|
|
|
|
{
|
|
|
|
const unsigned long addr_ul = (unsigned long) addr &
|
|
|
|
~(TCE_PCI_READ | TCE_PCI_WRITE);
|
|
|
|
|
|
|
|
if (level) {
|
|
|
|
long i;
|
|
|
|
u64 *tmp = (u64 *) addr_ul;
|
|
|
|
|
|
|
|
for (i = 0; i < size; ++i) {
|
|
|
|
unsigned long hpa = be64_to_cpu(tmp[i]);
|
|
|
|
|
|
|
|
if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
|
|
|
|
level - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free_pages(addr_ul, get_order(size << 3));
|
|
|
|
}
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
|
|
|
|
{
|
2015-06-05 14:35:19 +08:00
|
|
|
const unsigned long size = tbl->it_indirect_levels ?
|
|
|
|
tbl->it_level_size : tbl->it_size;
|
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
if (!tbl->it_size)
|
|
|
|
return;
|
|
|
|
|
2015-06-05 14:35:19 +08:00
|
|
|
pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
|
|
|
|
tbl->it_indirect_levels);
|
2015-06-05 14:35:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
|
|
|
struct pnv_ioda_pe *pe)
|
|
|
|
{
|
2013-04-26 03:21:01 +08:00
|
|
|
int64_t rc;
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
if (!pnv_pci_ioda_pe_dma_weight(pe))
|
|
|
|
return;
|
|
|
|
|
2015-06-05 14:35:10 +08:00
|
|
|
/* TVE #1 is selected by PCI address bit 59 */
|
|
|
|
pe->tce_bypass_base = 1ull << 59;
|
|
|
|
|
2015-06-05 14:35:08 +08:00
|
|
|
iommu_register_group(&pe->table_group, phb->hose->global_number,
|
|
|
|
pe->pe_number);
|
2015-06-05 14:34:55 +08:00
|
|
|
|
2013-04-26 03:21:01 +08:00
|
|
|
/* The PE will reserve all possible 32-bits space */
|
|
|
|
pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
|
2015-06-05 14:35:17 +08:00
|
|
|
phb->ioda.m32_pci_base);
|
2013-04-26 03:21:01 +08:00
|
|
|
|
2015-06-05 14:35:17 +08:00
|
|
|
/* Setup linux iommu table */
|
2015-06-05 14:35:20 +08:00
|
|
|
pe->table_group.tce32_start = 0;
|
|
|
|
pe->table_group.tce32_size = phb->ioda.m32_pci_base;
|
|
|
|
pe->table_group.max_dynamic_windows_supported =
|
|
|
|
IOMMU_TABLE_GROUP_MAX_TABLES;
|
|
|
|
pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
|
|
|
|
pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M;
|
2015-06-05 14:35:16 +08:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
pe->table_group.ops = &pnv_pci_ioda2_ops;
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
rc = pnv_pci_ioda2_setup_default_config(pe);
|
2016-05-03 13:41:34 +08:00
|
|
|
if (rc)
|
2015-06-05 14:35:23 +08:00
|
|
|
return;
|
2013-04-26 03:21:01 +08:00
|
|
|
|
2015-06-05 14:35:23 +08:00
|
|
|
if (pe->flags & PNV_IODA_PE_DEV)
|
2015-06-05 14:34:54 +08:00
|
|
|
iommu_add_device(&pe->pdev->dev);
|
2015-06-05 14:35:23 +08:00
|
|
|
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
2015-06-05 14:34:53 +08:00
|
|
|
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
2013-04-26 03:21:01 +08:00
|
|
|
}
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2016-08-19 13:35:49 +08:00
|
|
|
int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
|
2013-04-26 03:20:59 +08:00
|
|
|
{
|
|
|
|
struct pnv_phb *phb = container_of(chip, struct pnv_phb,
|
|
|
|
ioda.irq_chip);
|
2016-08-19 13:35:49 +08:00
|
|
|
|
|
|
|
return opal_pci_msi_eoi(phb->opal_id, hw_irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda2_msi_eoi(struct irq_data *d)
|
|
|
|
{
|
2013-04-26 03:20:59 +08:00
|
|
|
int64_t rc;
|
2016-08-19 13:35:49 +08:00
|
|
|
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
|
|
|
|
struct irq_chip *chip = irq_data_get_irq_chip(d);
|
2013-04-26 03:20:59 +08:00
|
|
|
|
2016-08-19 13:35:49 +08:00
|
|
|
rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
|
2013-04-26 03:20:59 +08:00
|
|
|
WARN_ON_ONCE(rc);
|
|
|
|
|
|
|
|
icp_native_eoi(d);
|
|
|
|
}
|
|
|
|
|
2014-10-08 16:54:55 +08:00
|
|
|
|
2016-07-14 05:17:00 +08:00
|
|
|
void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
|
2014-10-08 16:54:55 +08:00
|
|
|
{
|
|
|
|
struct irq_data *idata;
|
|
|
|
struct irq_chip *ichip;
|
|
|
|
|
2016-07-08 14:37:09 +08:00
|
|
|
/* The MSI EOI OPAL call is only needed on PHB3 */
|
|
|
|
if (phb->model != PNV_PHB_MODEL_PHB3)
|
2014-10-08 16:54:55 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!phb->ioda.irq_chip_init) {
|
|
|
|
/*
|
|
|
|
* First time we setup an MSI IRQ, we need to setup the
|
|
|
|
* corresponding IRQ chip to route correctly.
|
|
|
|
*/
|
|
|
|
idata = irq_get_irq_data(virq);
|
|
|
|
ichip = irq_data_get_irq_chip(idata);
|
|
|
|
phb->ioda.irq_chip_init = 1;
|
|
|
|
phb->ioda.irq_chip = *ichip;
|
|
|
|
phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
|
|
|
|
}
|
|
|
|
irq_set_chip(virq, &phb->ioda.irq_chip);
|
|
|
|
}
|
|
|
|
|
2016-08-19 13:35:49 +08:00
|
|
|
/*
|
|
|
|
* Returns true iff chip is something that we could call
|
|
|
|
* pnv_opal_pci_msi_eoi for.
|
|
|
|
*/
|
|
|
|
bool is_pnv_opal_msi(struct irq_chip *chip)
|
|
|
|
{
|
|
|
|
return chip->irq_eoi == pnv_ioda2_msi_eoi;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
|
2013-04-26 03:20:59 +08:00
|
|
|
unsigned int hwirq, unsigned int virq,
|
|
|
|
unsigned int is_64, struct msi_msg *msg)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
|
|
|
|
unsigned int xive_num = hwirq - phb->msi_base;
|
2013-09-23 10:05:01 +08:00
|
|
|
__be32 data;
|
2011-11-16 01:29:08 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* No PE assigned ? bail out ... no MSI for you ! */
|
|
|
|
if (pe == NULL)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
/* Check if we have an MVE */
|
|
|
|
if (pe->mve_number < 0)
|
|
|
|
return -ENXIO;
|
|
|
|
|
2013-05-22 06:58:21 +08:00
|
|
|
/* Force 32-bit MSI on some broken devices */
|
2014-10-07 13:12:36 +08:00
|
|
|
if (dev->no_64bit_msi)
|
2013-05-22 06:58:21 +08:00
|
|
|
is_64 = 0;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Assign XIVE to PE */
|
|
|
|
rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
|
|
|
|
pci_name(dev), rc, xive_num);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_64) {
|
2013-09-23 10:05:01 +08:00
|
|
|
__be64 addr64;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
|
|
|
|
&addr64, &data);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
|
|
|
|
pci_name(dev), rc);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-09-23 10:05:01 +08:00
|
|
|
msg->address_hi = be64_to_cpu(addr64) >> 32;
|
|
|
|
msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
|
2011-11-16 01:29:08 +08:00
|
|
|
} else {
|
2013-09-23 10:05:01 +08:00
|
|
|
__be32 addr32;
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
|
|
|
|
&addr32, &data);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
|
|
|
|
pci_name(dev), rc);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
msg->address_hi = 0;
|
2013-09-23 10:05:01 +08:00
|
|
|
msg->address_lo = be32_to_cpu(addr32);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
2013-09-23 10:05:01 +08:00
|
|
|
msg->data = be32_to_cpu(data);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2016-07-14 05:17:00 +08:00
|
|
|
pnv_set_msi_irq_chip(phb, virq);
|
2013-04-26 03:20:59 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
|
2016-11-16 11:02:15 +08:00
|
|
|
" address=%x_%08x data=%x PE# %x\n",
|
2011-11-16 01:29:08 +08:00
|
|
|
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
|
|
|
|
msg->address_hi, msg->address_lo, data, pe->pe_number);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
|
|
|
|
{
|
2013-03-06 05:12:37 +08:00
|
|
|
unsigned int count;
|
2011-11-16 01:29:08 +08:00
|
|
|
const __be32 *prop = of_get_property(phb->hose->dn,
|
|
|
|
"ibm,opal-msi-ranges", NULL);
|
|
|
|
if (!prop) {
|
|
|
|
/* BML Fallback */
|
|
|
|
prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
|
|
|
|
}
|
|
|
|
if (!prop)
|
|
|
|
return;
|
|
|
|
|
|
|
|
phb->msi_base = be32_to_cpup(prop);
|
2013-03-06 05:12:37 +08:00
|
|
|
count = be32_to_cpup(prop + 1);
|
|
|
|
if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
|
2011-11-16 01:29:08 +08:00
|
|
|
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
|
|
|
|
phb->hose->global_number);
|
|
|
|
return;
|
|
|
|
}
|
2013-03-06 05:12:37 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->msi_setup = pnv_pci_ioda_msi_setup;
|
|
|
|
phb->msi32_support = 1;
|
|
|
|
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
|
2013-03-06 05:12:37 +08:00
|
|
|
count, phb->msi_base);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
|
|
|
|
2015-03-25 16:23:55 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
|
|
|
|
{
|
2015-10-22 09:22:17 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
const resource_size_t gate = phb->ioda.m64_segsize >> 2;
|
2015-03-25 16:23:55 +08:00
|
|
|
struct resource *res;
|
|
|
|
int i;
|
2015-10-22 09:22:18 +08:00
|
|
|
resource_size_t size, total_vf_bar_sz;
|
2015-03-25 16:23:55 +08:00
|
|
|
struct pci_dn *pdn;
|
2015-03-25 16:23:58 +08:00
|
|
|
int mul, total_vfs;
|
2015-03-25 16:23:55 +08:00
|
|
|
|
|
|
|
if (!pdev->is_physfn || pdev->is_added)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
pdn->vfs_expanded = 0;
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_single_mode = false;
|
2015-03-25 16:23:55 +08:00
|
|
|
|
2015-03-25 16:23:58 +08:00
|
|
|
total_vfs = pci_sriov_get_totalvfs(pdev);
|
2016-05-03 13:41:24 +08:00
|
|
|
mul = phb->ioda.total_pe_num;
|
2015-10-22 09:22:18 +08:00
|
|
|
total_vf_bar_sz = 0;
|
2015-03-25 16:23:58 +08:00
|
|
|
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || res->parent)
|
|
|
|
continue;
|
2016-09-14 14:37:17 +08:00
|
|
|
if (!pnv_pci_is_m64_flags(res->flags)) {
|
2015-10-22 09:22:14 +08:00
|
|
|
dev_warn(&pdev->dev, "Don't support SR-IOV with"
|
|
|
|
" non M64 VF BAR%d: %pR. \n",
|
2015-03-25 16:23:58 +08:00
|
|
|
i, res);
|
2015-10-22 09:22:14 +08:00
|
|
|
goto truncate_iov;
|
2015-03-25 16:23:58 +08:00
|
|
|
}
|
|
|
|
|
2015-10-22 09:22:18 +08:00
|
|
|
total_vf_bar_sz += pci_iov_resource_size(pdev,
|
|
|
|
i + PCI_IOV_RESOURCES);
|
2015-03-25 16:23:58 +08:00
|
|
|
|
2015-10-22 09:22:17 +08:00
|
|
|
/*
|
|
|
|
* If bigger than quarter of M64 segment size, just round up
|
|
|
|
* power of two.
|
|
|
|
*
|
|
|
|
* Generally, one M64 BAR maps one IOV BAR. To avoid conflict
|
|
|
|
* with other devices, IOV BAR size is expanded to be
|
|
|
|
* (total_pe * VF_BAR_size). When VF_BAR_size is half of M64
|
|
|
|
* segment size , the expanded size would equal to half of the
|
|
|
|
* whole M64 space size, which will exhaust the M64 Space and
|
|
|
|
* limit the system flexibility. This is a design decision to
|
|
|
|
* set the boundary to quarter of the M64 segment size.
|
|
|
|
*/
|
2015-10-22 09:22:18 +08:00
|
|
|
if (total_vf_bar_sz > gate) {
|
2015-03-25 16:23:58 +08:00
|
|
|
mul = roundup_pow_of_two(total_vfs);
|
2015-10-22 09:22:18 +08:00
|
|
|
dev_info(&pdev->dev,
|
|
|
|
"VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
|
|
|
|
total_vf_bar_sz, gate, mul);
|
2015-10-22 09:22:16 +08:00
|
|
|
pdn->m64_single_mode = true;
|
2015-03-25 16:23:58 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:55 +08:00
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
if (!res->flags || res->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
|
2015-10-22 09:22:16 +08:00
|
|
|
/*
|
|
|
|
* On PHB3, the minimum size alignment of M64 BAR in single
|
|
|
|
* mode is 32MB.
|
|
|
|
*/
|
|
|
|
if (pdn->m64_single_mode && (size < SZ_32M))
|
|
|
|
goto truncate_iov;
|
|
|
|
dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
|
2015-03-25 16:23:58 +08:00
|
|
|
res->end = res->start + size * mul - 1;
|
2015-03-25 16:23:55 +08:00
|
|
|
dev_dbg(&pdev->dev, " %pR\n", res);
|
|
|
|
dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
|
2015-03-25 16:23:58 +08:00
|
|
|
i, res, mul);
|
2015-03-25 16:23:55 +08:00
|
|
|
}
|
2015-03-25 16:23:58 +08:00
|
|
|
pdn->vfs_expanded = mul;
|
2015-10-22 09:22:14 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
truncate_iov:
|
|
|
|
/* To save MMIO space, IOV BAR is truncated. */
|
|
|
|
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
|
|
|
res = &pdev->resource[i + PCI_IOV_RESOURCES];
|
|
|
|
res->flags = 0;
|
|
|
|
res->end = res->start - 1;
|
|
|
|
}
|
2015-03-25 16:23:55 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2016-05-03 13:41:27 +08:00
|
|
|
static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
|
|
|
|
struct resource *res)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
struct pci_bus_region region;
|
|
|
|
int index;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!res || !res->flags || res->start > res->end)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (res->flags & IORESOURCE_IO) {
|
|
|
|
region.start = res->start - phb->ioda.io_pci_base;
|
|
|
|
region.end = res->end - phb->ioda.io_pci_base;
|
|
|
|
index = region.start / phb->ioda.io_segsize;
|
|
|
|
|
|
|
|
while (index < phb->ioda.total_pe_num &&
|
|
|
|
region.start <= region.end) {
|
|
|
|
phb->ioda.io_segmap[index] = pe->pe_number;
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
|
2016-05-03 13:41:27 +08:00
|
|
|
__func__, rc, index, pe->pe_number);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
region.start += phb->ioda.io_segsize;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
} else if ((res->flags & IORESOURCE_MEM) &&
|
2016-07-08 13:55:43 +08:00
|
|
|
!pnv_pci_is_m64(phb, res)) {
|
2016-05-03 13:41:27 +08:00
|
|
|
region.start = res->start -
|
|
|
|
phb->hose->mem_offset[0] -
|
|
|
|
phb->ioda.m32_pci_base;
|
|
|
|
region.end = res->end -
|
|
|
|
phb->hose->mem_offset[0] -
|
|
|
|
phb->ioda.m32_pci_base;
|
|
|
|
index = region.start / phb->ioda.m32_segsize;
|
|
|
|
|
|
|
|
while (index < phb->ioda.total_pe_num &&
|
|
|
|
region.start <= region.end) {
|
|
|
|
phb->ioda.m32_segmap[index] = pe->pe_number;
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
|
2016-05-03 13:41:27 +08:00
|
|
|
__func__, rc, index, pe->pe_number);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
region.start += phb->ioda.m32_segsize;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-20 11:49:16 +08:00
|
|
|
/*
|
|
|
|
* This function is supposed to be called on basis of PE from top
|
|
|
|
* to bottom style. So the the I/O or MMIO segment assigned to
|
|
|
|
* parent PE could be overrided by its child PEs if necessary.
|
|
|
|
*/
|
2016-05-03 13:41:27 +08:00
|
|
|
static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
|
2012-08-20 11:49:16 +08:00
|
|
|
{
|
2016-05-03 13:41:28 +08:00
|
|
|
struct pci_dev *pdev;
|
2016-05-03 13:41:27 +08:00
|
|
|
int i;
|
2012-08-20 11:49:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: We only care PCI bus based PE for now. For PCI
|
|
|
|
* device based PE, for example SRIOV sensitive VF should
|
|
|
|
* be figured out later.
|
|
|
|
*/
|
|
|
|
BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
|
|
|
|
|
2016-05-03 13:41:28 +08:00
|
|
|
list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
|
|
|
|
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
|
|
|
|
pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PE contains all subordinate PCI buses, the
|
|
|
|
* windows of the child bridges should be mapped to
|
|
|
|
* the PE as well.
|
|
|
|
*/
|
|
|
|
if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
|
|
|
|
continue;
|
|
|
|
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
|
|
|
|
pnv_ioda_setup_pe_res(pe,
|
|
|
|
&pdev->resource[PCI_BRIDGE_RESOURCES + i]);
|
|
|
|
}
|
2012-08-20 11:49:16 +08:00
|
|
|
}
|
|
|
|
|
2016-07-28 13:05:03 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static int pnv_pci_diag_data_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
s64 ret;
|
|
|
|
|
|
|
|
if (val != 1ULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
hose = (struct pci_controller *)data;
|
|
|
|
if (!hose || !hose->private_data)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
|
|
|
/* Retrieve the diag data from firmware */
|
|
|
|
ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
|
|
|
|
PNV_PCI_DIAG_BUF_SIZE);
|
|
|
|
if (ret != OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* Print the diag data to the kernel log */
|
|
|
|
pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
|
|
|
|
pnv_pci_diag_data_set, "%llu\n");
|
|
|
|
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
|
2013-06-20 18:13:25 +08:00
|
|
|
static void pnv_pci_ioda_create_dbgfs(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
struct pci_controller *hose, *tmp;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
char name[16];
|
|
|
|
|
|
|
|
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
/* Notify initialization of PHB done */
|
|
|
|
phb->initialized = 1;
|
|
|
|
|
2013-06-20 18:13:25 +08:00
|
|
|
sprintf(name, "PCI%04x", hose->global_number);
|
|
|
|
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
|
2016-07-28 13:05:03 +08:00
|
|
|
if (!phb->dbgfs) {
|
2013-06-20 18:13:25 +08:00
|
|
|
pr_warning("%s: Error on creating debugfs on PHB#%x\n",
|
|
|
|
__func__, hose->global_number);
|
2016-07-28 13:05:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
|
|
|
|
&pnv_pci_diag_data_fops);
|
2013-06-20 18:13:25 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
}
|
|
|
|
|
2012-12-22 06:04:10 +08:00
|
|
|
static void pnv_pci_ioda_fixup(void)
|
2012-08-20 11:49:14 +08:00
|
|
|
{
|
|
|
|
pnv_pci_ioda_setup_PEs();
|
2016-05-20 14:41:31 +08:00
|
|
|
pnv_pci_ioda_setup_iommu_api();
|
2013-06-20 18:13:25 +08:00
|
|
|
pnv_pci_ioda_create_dbgfs();
|
|
|
|
|
2013-06-20 13:21:14 +08:00
|
|
|
#ifdef CONFIG_EEH
|
|
|
|
eeh_init();
|
2014-06-26 14:58:47 +08:00
|
|
|
eeh_addr_cache_build();
|
2013-06-20 13:21:14 +08:00
|
|
|
#endif
|
2012-08-20 11:49:14 +08:00
|
|
|
}
|
|
|
|
|
2012-09-12 06:59:47 +08:00
|
|
|
/*
|
|
|
|
* Returns the alignment for I/O or memory windows for P2P
|
|
|
|
* bridges. That actually depends on how PEs are segmented.
|
|
|
|
* For now, we return I/O or M32 segment size for PE sensitive
|
|
|
|
* P2P bridges. Otherwise, the default values (4KiB for I/O,
|
|
|
|
* 1MiB for memory) will be returned.
|
|
|
|
*
|
|
|
|
* The current PCI bus might be put into one PE, which was
|
|
|
|
* create against the parent PCI bridge. For that case, we
|
|
|
|
* needn't enlarge the alignment so that we can save some
|
|
|
|
* resources.
|
|
|
|
*/
|
|
|
|
static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
|
|
|
|
unsigned long type)
|
|
|
|
{
|
|
|
|
struct pci_dev *bridge;
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
int num_pci_bridges = 0;
|
|
|
|
|
|
|
|
bridge = bus->self;
|
|
|
|
while (bridge) {
|
|
|
|
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
|
|
|
|
num_pci_bridges++;
|
|
|
|
if (num_pci_bridges >= 2)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bridge = bridge->bus->self;
|
|
|
|
}
|
|
|
|
|
2016-07-08 13:55:43 +08:00
|
|
|
/*
|
|
|
|
* We fall back to M32 if M64 isn't supported. We enforce the M64
|
|
|
|
* alignment for any 64-bit resource, PCIe doesn't care and
|
|
|
|
* bridges only do 64-bit prefetchable anyway.
|
|
|
|
*/
|
2016-09-14 14:37:17 +08:00
|
|
|
if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
|
2014-07-21 12:42:30 +08:00
|
|
|
return phb->ioda.m64_segsize;
|
2012-09-12 06:59:47 +08:00
|
|
|
if (type & IORESOURCE_MEM)
|
|
|
|
return phb->ioda.m32_segsize;
|
|
|
|
|
|
|
|
return phb->ioda.io_segsize;
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:33 +08:00
|
|
|
/*
|
|
|
|
* We are updating root port or the upstream port of the
|
|
|
|
* bridge behind the root port with PHB's windows in order
|
|
|
|
* to accommodate the changes on required resources during
|
|
|
|
* PCI (slot) hotplug, which is connected to either root
|
|
|
|
* port or the downstream ports of PCIe switch behind the
|
|
|
|
* root port.
|
|
|
|
*/
|
|
|
|
static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
|
|
|
|
unsigned long type)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dev *bridge = bus->self;
|
|
|
|
struct resource *r, *w;
|
|
|
|
bool msi_region = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Check if we need apply fixup to the bridge's windows */
|
|
|
|
if (!pci_is_root_bus(bridge->bus) &&
|
|
|
|
!pci_is_root_bus(bridge->bus->self->bus))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Fixup the resources */
|
|
|
|
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
|
|
|
|
r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
|
|
|
|
if (!r->flags || !r->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
w = NULL;
|
|
|
|
if (r->flags & type & IORESOURCE_IO)
|
|
|
|
w = &hose->io_resource;
|
2016-07-08 13:55:43 +08:00
|
|
|
else if (pnv_pci_is_m64(phb, r) &&
|
2016-05-20 14:41:33 +08:00
|
|
|
(type & IORESOURCE_PREFETCH) &&
|
|
|
|
phb->ioda.m64_segsize)
|
|
|
|
w = &hose->mem_resources[1];
|
|
|
|
else if (r->flags & type & IORESOURCE_MEM) {
|
|
|
|
w = &hose->mem_resources[0];
|
|
|
|
msi_region = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
r->start = w->start;
|
|
|
|
r->end = w->end;
|
|
|
|
|
|
|
|
/* The 64KB 32-bits MSI region shouldn't be included in
|
|
|
|
* the 32-bits bridge window. Otherwise, we can see strange
|
|
|
|
* issues. One of them is EEH error observed on Garrison.
|
|
|
|
*
|
|
|
|
* Exclude top 1MB region which is the minimal alignment of
|
|
|
|
* 32-bits bridge window.
|
|
|
|
*/
|
|
|
|
if (msi_region) {
|
|
|
|
r->end += 0x10000;
|
|
|
|
r->end -= 0x100000;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dev *bridge = bus->self;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
|
|
|
|
|
2016-05-20 14:41:33 +08:00
|
|
|
/* Extend bridge's windows if necessary */
|
|
|
|
pnv_pci_fixup_bridge_resources(bus, type);
|
|
|
|
|
2016-05-20 14:41:32 +08:00
|
|
|
/* The PE for root bus should be realized before any one else */
|
|
|
|
if (!phb->ioda.root_pe_populated) {
|
|
|
|
pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
|
|
|
|
if (pe) {
|
|
|
|
phb->ioda.root_pe_idx = pe->pe_number;
|
|
|
|
phb->ioda.root_pe_populated = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:31 +08:00
|
|
|
/* Don't assign PE to PCI bus, which doesn't have subordinate devices */
|
|
|
|
if (list_empty(&bus->devices))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Reserve PEs according to used M64 resources */
|
|
|
|
if (phb->reserve_m64_pe)
|
|
|
|
phb->reserve_m64_pe(bus, NULL, all);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assign PE. We might run here because of partial hotplug.
|
|
|
|
* For the case, we just pick up the existing PE and should
|
|
|
|
* not allocate resources again.
|
|
|
|
*/
|
|
|
|
pe = pnv_ioda_setup_bus_PE(bus, all);
|
|
|
|
if (!pe)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pnv_ioda_setup_pe_seg(pe);
|
|
|
|
switch (phb->type) {
|
|
|
|
case PNV_PHB_IODA1:
|
|
|
|
pnv_pci_ioda1_setup_dma_pe(phb, pe);
|
|
|
|
break;
|
|
|
|
case PNV_PHB_IODA2:
|
|
|
|
pnv_pci_ioda2_setup_dma_pe(phb, pe);
|
|
|
|
break;
|
|
|
|
default:
|
2016-11-16 11:02:15 +08:00
|
|
|
pr_warn("%s: No DMA for PHB#%x (type %d)\n",
|
2016-05-20 14:41:31 +08:00
|
|
|
__func__, phb->hose->global_number, phb->type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 16:23:56 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
|
|
|
|
int resno)
|
|
|
|
{
|
2015-10-22 09:22:16 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2015-03-25 16:23:56 +08:00
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
2015-10-22 09:22:15 +08:00
|
|
|
resource_size_t align;
|
2015-03-25 16:23:56 +08:00
|
|
|
|
2015-10-22 09:22:15 +08:00
|
|
|
/*
|
|
|
|
* On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
|
|
|
|
* SR-IOV. While from hardware perspective, the range mapped by M64
|
|
|
|
* BAR should be size aligned.
|
|
|
|
*
|
2015-10-22 09:22:16 +08:00
|
|
|
* When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
|
|
|
|
* powernv-specific hardware restriction is gone. But if just use the
|
|
|
|
* VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
|
|
|
|
* in one segment of M64 #15, which introduces the PE conflict between
|
|
|
|
* PF and VF. Based on this, the minimum alignment of an IOV BAR is
|
|
|
|
* m64_segsize.
|
|
|
|
*
|
2015-10-22 09:22:15 +08:00
|
|
|
* This function returns the total IOV BAR size if M64 BAR is in
|
|
|
|
* Shared PE mode or just VF BAR size if not.
|
2015-10-22 09:22:16 +08:00
|
|
|
* If the M64 BAR is in Single PE mode, return the VF BAR size or
|
|
|
|
* M64 segment size if IOV BAR size is less.
|
2015-10-22 09:22:15 +08:00
|
|
|
*/
|
2015-03-25 16:23:56 +08:00
|
|
|
align = pci_iov_resource_size(pdev, resno);
|
2015-10-22 09:22:15 +08:00
|
|
|
if (!pdn->vfs_expanded)
|
|
|
|
return align;
|
2015-10-22 09:22:16 +08:00
|
|
|
if (pdn->m64_single_mode)
|
|
|
|
return max(align, (resource_size_t)phb->ioda.m64_segsize);
|
2015-03-25 16:23:56 +08:00
|
|
|
|
2015-10-22 09:22:15 +08:00
|
|
|
return pdn->vfs_expanded * align;
|
2015-03-25 16:23:56 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Prevent enabling devices for which we couldn't properly
|
|
|
|
* assign a PE
|
|
|
|
*/
|
2016-07-14 05:17:06 +08:00
|
|
|
bool pnv_pci_enable_device_hook(struct pci_dev *dev)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
2012-08-20 11:49:18 +08:00
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dn *pdn;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2012-08-20 11:49:18 +08:00
|
|
|
/* The function is probably called while the PEs have
|
|
|
|
* not be created yet. For example, resource reassignment
|
|
|
|
* during PCI probe period. We just skip the check if
|
|
|
|
* PEs isn't ready.
|
|
|
|
*/
|
|
|
|
if (!phb->initialized)
|
2015-03-31 13:00:41 +08:00
|
|
|
return true;
|
2012-08-20 11:49:18 +08:00
|
|
|
|
2013-05-22 06:58:21 +08:00
|
|
|
pdn = pci_get_pdn(dev);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
2015-03-31 13:00:41 +08:00
|
|
|
return false;
|
2012-08-20 11:49:18 +08:00
|
|
|
|
2015-03-31 13:00:41 +08:00
|
|
|
return true;
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
|
|
|
|
int num)
|
|
|
|
{
|
|
|
|
struct pnv_ioda_pe *pe = container_of(table_group,
|
|
|
|
struct pnv_ioda_pe, table_group);
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
unsigned int idx;
|
|
|
|
long rc;
|
|
|
|
|
|
|
|
pe_info(pe, "Removing DMA window #%d\n", num);
|
|
|
|
for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
|
|
|
|
if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
|
|
|
|
idx, 0, 0ul, 0ul, 0ul);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
|
|
|
|
rc, idx);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
|
|
|
|
return OPAL_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
|
|
|
|
struct iommu_table *tbl = pe->table_group.tables[0];
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!weight)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
return;
|
|
|
|
|
2016-07-08 14:37:12 +08:00
|
|
|
pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
|
2016-05-20 14:41:35 +08:00
|
|
|
if (pe->table_group.group) {
|
|
|
|
iommu_group_put(pe->table_group.group);
|
|
|
|
WARN_ON(pe->table_group.group);
|
|
|
|
}
|
|
|
|
|
|
|
|
free_pages(tbl->it_base, get_order(tbl->it_size << 3));
|
|
|
|
iommu_free_table(tbl, "pnv");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl = pe->table_group.tables[0];
|
|
|
|
unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
|
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
int64_t rc;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!weight)
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
|
|
|
if (rc)
|
|
|
|
pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
pnv_pci_ioda2_set_bypass(pe, false);
|
|
|
|
if (pe->table_group.group) {
|
|
|
|
iommu_group_put(pe->table_group.group);
|
|
|
|
WARN_ON(pe->table_group.group);
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_pci_ioda2_table_free_pages(tbl);
|
|
|
|
iommu_free_table(tbl, "pnv");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
|
|
|
|
unsigned short win,
|
|
|
|
unsigned int *map)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
int idx;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
|
|
|
|
if (map[idx] != pe->pe_number)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (win == OPAL_M64_WINDOW_TYPE)
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
phb->ioda.reserved_pe_idx, win,
|
|
|
|
idx / PNV_IODA1_M64_SEGS,
|
|
|
|
idx % PNV_IODA1_M64_SEGS);
|
|
|
|
else
|
|
|
|
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
|
|
|
|
phb->ioda.reserved_pe_idx, win, 0, idx);
|
|
|
|
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
|
|
|
|
rc, win, idx);
|
|
|
|
|
|
|
|
map[idx] = IODA_INVALID_PE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
|
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
|
|
|
|
phb->ioda.io_segmap);
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
|
|
|
|
phb->ioda.m32_segmap);
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
|
|
|
|
phb->ioda.m64_segmap);
|
|
|
|
} else if (phb->type == PNV_PHB_IODA2) {
|
|
|
|
pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
|
|
|
|
phb->ioda.m32_segmap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
|
|
|
|
{
|
|
|
|
struct pnv_phb *phb = pe->phb;
|
|
|
|
struct pnv_ioda_pe *slave, *tmp;
|
|
|
|
|
|
|
|
list_del(&pe->list);
|
|
|
|
switch (phb->type) {
|
|
|
|
case PNV_PHB_IODA1:
|
|
|
|
pnv_pci_ioda1_release_pe_dma(pe);
|
|
|
|
break;
|
|
|
|
case PNV_PHB_IODA2:
|
|
|
|
pnv_pci_ioda2_release_pe_dma(pe);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_ioda_release_pe_seg(pe);
|
|
|
|
pnv_ioda_deconfigure_pe(pe->phb, pe);
|
2016-09-06 12:16:44 +08:00
|
|
|
|
|
|
|
/* Release slave PEs in the compound PE */
|
|
|
|
if (pe->flags & PNV_IODA_PE_MASTER) {
|
|
|
|
list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
|
|
|
|
list_del(&slave->list);
|
|
|
|
pnv_ioda_free_pe(slave);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-13 14:40:24 +08:00
|
|
|
/*
|
|
|
|
* The PE for root bus can be removed because of hotplug in EEH
|
|
|
|
* recovery for fenced PHB error. We need to mark the PE dead so
|
|
|
|
* that it can be populated again in PCI hot add path. The PE
|
|
|
|
* shouldn't be destroyed as it's the global reserved resource.
|
|
|
|
*/
|
|
|
|
if (phb->ioda.root_pe_populated &&
|
|
|
|
phb->ioda.root_pe_idx == pe->pe_number)
|
|
|
|
phb->ioda.root_pe_populated = false;
|
|
|
|
else
|
|
|
|
pnv_ioda_free_pe(pe);
|
2016-05-20 14:41:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_release_device(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pci_dn *pdn = pci_get_pdn(pdev);
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
if (pdev->is_virtfn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
|
|
|
return;
|
|
|
|
|
2016-09-06 14:34:01 +08:00
|
|
|
/*
|
|
|
|
* PCI hotplug can happen as part of EEH error recovery. The @pdn
|
|
|
|
* isn't removed and added afterwards in this scenario. We should
|
|
|
|
* set the PE number in @pdn to an invalid one. Otherwise, the PE's
|
|
|
|
* device count is decreased on removing devices while failing to
|
|
|
|
* be increased on adding devices. It leads to unbalanced PE's device
|
|
|
|
* count and eventually make normal PCI hotplug path broken.
|
|
|
|
*/
|
2016-05-20 14:41:35 +08:00
|
|
|
pe = &phb->ioda.pe_array[pdn->pe_number];
|
2016-09-06 14:34:01 +08:00
|
|
|
pdn->pe_number = IODA_INVALID_PE;
|
|
|
|
|
2016-05-20 14:41:35 +08:00
|
|
|
WARN_ON(--pe->device_count < 0);
|
|
|
|
if (pe->device_count == 0)
|
|
|
|
pnv_ioda_release_pe(pe);
|
|
|
|
}
|
|
|
|
|
2015-05-27 14:06:59 +08:00
|
|
|
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
|
2013-05-10 14:59:18 +08:00
|
|
|
{
|
2015-05-27 14:06:59 +08:00
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
2014-09-30 10:39:05 +08:00
|
|
|
opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
|
2013-05-10 14:59:18 +08:00
|
|
|
OPAL_ASSERT_RESET);
|
|
|
|
}
|
|
|
|
|
2015-04-28 13:12:05 +08:00
|
|
|
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
|
2016-05-03 13:41:21 +08:00
|
|
|
.dma_dev_setup = pnv_pci_dma_dev_setup,
|
|
|
|
.dma_bus_setup = pnv_pci_dma_bus_setup,
|
2015-04-28 13:12:05 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2016-05-03 13:41:21 +08:00
|
|
|
.setup_msi_irqs = pnv_setup_msi_irqs,
|
|
|
|
.teardown_msi_irqs = pnv_teardown_msi_irqs,
|
2015-04-28 13:12:05 +08:00
|
|
|
#endif
|
2016-05-03 13:41:21 +08:00
|
|
|
.enable_device_hook = pnv_pci_enable_device_hook,
|
2016-05-20 14:41:35 +08:00
|
|
|
.release_device = pnv_pci_release_device,
|
2016-05-03 13:41:21 +08:00
|
|
|
.window_alignment = pnv_pci_window_alignment,
|
2016-05-20 14:41:31 +08:00
|
|
|
.setup_bridge = pnv_pci_setup_bridge,
|
2016-05-03 13:41:21 +08:00
|
|
|
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
|
.dma_set_mask = pnv_pci_ioda_dma_set_mask,
|
|
|
|
.dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
|
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
2015-04-28 13:12:05 +08:00
|
|
|
};
|
|
|
|
|
2016-04-29 16:55:20 +08:00
|
|
|
static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
|
|
|
|
{
|
|
|
|
dev_err_once(&npdev->dev,
|
|
|
|
"%s operation unsupported for NVLink devices\n",
|
|
|
|
__func__);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2015-12-17 10:43:13 +08:00
|
|
|
static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
|
2016-05-03 13:41:21 +08:00
|
|
|
.dma_dev_setup = pnv_pci_dma_dev_setup,
|
2015-12-17 10:43:13 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2016-05-03 13:41:21 +08:00
|
|
|
.setup_msi_irqs = pnv_setup_msi_irqs,
|
|
|
|
.teardown_msi_irqs = pnv_teardown_msi_irqs,
|
2015-12-17 10:43:13 +08:00
|
|
|
#endif
|
2016-05-03 13:41:21 +08:00
|
|
|
.enable_device_hook = pnv_pci_enable_device_hook,
|
|
|
|
.window_alignment = pnv_pci_window_alignment,
|
|
|
|
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
|
.dma_set_mask = pnv_npu_dma_set_mask,
|
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
2015-12-17 10:43:13 +08:00
|
|
|
};
|
|
|
|
|
2016-07-14 05:17:06 +08:00
|
|
|
#ifdef CONFIG_CXL_BASE
|
|
|
|
const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
|
|
|
|
.dma_dev_setup = pnv_pci_dma_dev_setup,
|
|
|
|
.dma_bus_setup = pnv_pci_dma_bus_setup,
|
2016-07-14 05:17:10 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
.setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs,
|
|
|
|
.teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs,
|
|
|
|
#endif
|
2016-07-14 05:17:06 +08:00
|
|
|
.enable_device_hook = pnv_cxl_enable_device_hook,
|
|
|
|
.disable_device = pnv_cxl_disable_device,
|
|
|
|
.release_device = pnv_pci_release_device,
|
|
|
|
.window_alignment = pnv_pci_window_alignment,
|
|
|
|
.setup_bridge = pnv_pci_setup_bridge,
|
|
|
|
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
|
|
|
|
.dma_set_mask = pnv_pci_ioda_dma_set_mask,
|
|
|
|
.dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
|
|
|
|
.shutdown = pnv_pci_ioda_shutdown,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
|
|
|
|
u64 hub_id, int ioda_type)
|
2011-11-16 01:29:08 +08:00
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
2016-05-05 10:04:16 +08:00
|
|
|
unsigned long size, m64map_off, m32map_off, pemap_off;
|
|
|
|
unsigned long iomap_off = 0, dma32map_off = 0;
|
2016-07-08 14:37:14 +08:00
|
|
|
struct resource r;
|
2013-09-23 10:04:57 +08:00
|
|
|
const __be64 *prop64;
|
2013-09-23 10:05:01 +08:00
|
|
|
const __be32 *prop32;
|
2013-07-31 16:47:01 +08:00
|
|
|
int len;
|
2016-05-03 13:41:26 +08:00
|
|
|
unsigned int segno;
|
2011-11-16 01:29:08 +08:00
|
|
|
u64 phb_id;
|
|
|
|
void *aux;
|
|
|
|
long rc;
|
|
|
|
|
2016-07-08 14:37:17 +08:00
|
|
|
if (!of_device_is_available(np))
|
|
|
|
return;
|
|
|
|
|
2016-06-21 10:35:56 +08:00
|
|
|
pr_info("Initializing %s PHB (%s)\n",
|
|
|
|
pnv_phb_names[ioda_type], of_node_full_name(np));
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
|
|
|
|
if (!prop64) {
|
|
|
|
pr_err(" Missing \"ibm,opal-phbid\" property !\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
phb_id = be64_to_cpup(prop64);
|
|
|
|
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
|
|
|
|
|
powerpc: Remove more traces of bootmem
Although we are now selecting NO_BOOTMEM, we still have some traces of
bootmem lying around. That is because even with NO_BOOTMEM there is
still a shim that converts bootmem calls into memblock calls, but
ultimately we want to remove all traces of bootmem.
Most of the patch is conversions from alloc_bootmem() to
memblock_virt_alloc(). In general a call such as:
p = (struct foo *)alloc_bootmem(x);
Becomes:
p = memblock_virt_alloc(x, 0);
We don't need the cast because memblock_virt_alloc() returns a void *.
The alignment value of zero tells memblock to use the default alignment,
which is SMP_CACHE_BYTES, the same value alloc_bootmem() uses.
We remove a number of NULL checks on the result of
memblock_virt_alloc(). That is because memblock_virt_alloc() will panic
if it can't allocate, in exactly the same way as alloc_bootmem(), so the
NULL checks are and always have been redundant.
The memory returned by memblock_virt_alloc() is already zeroed, so we
remove several memsets of the result of memblock_virt_alloc().
Finally we convert a few uses of __alloc_bootmem(x, y, MAX_DMA_ADDRESS)
to just plain memblock_virt_alloc(). We don't use memblock_alloc_base()
because MAX_DMA_ADDRESS is ~0ul on powerpc, so limiting the allocation
to that is pointless, 16XB ought to be enough for anyone.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-11-18 13:47:35 +08:00
|
|
|
phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
|
2013-07-31 16:47:00 +08:00
|
|
|
|
|
|
|
/* Allocate PCI controller */
|
|
|
|
phb->hose = hose = pcibios_alloc_controller(np);
|
|
|
|
if (!phb->hose) {
|
|
|
|
pr_err(" Can't allocate PCI controller for %s\n",
|
2011-11-16 01:29:08 +08:00
|
|
|
np->full_name);
|
powerpc: Remove more traces of bootmem
Although we are now selecting NO_BOOTMEM, we still have some traces of
bootmem lying around. That is because even with NO_BOOTMEM there is
still a shim that converts bootmem calls into memblock calls, but
ultimately we want to remove all traces of bootmem.
Most of the patch is conversions from alloc_bootmem() to
memblock_virt_alloc(). In general a call such as:
p = (struct foo *)alloc_bootmem(x);
Becomes:
p = memblock_virt_alloc(x, 0);
We don't need the cast because memblock_virt_alloc() returns a void *.
The alignment value of zero tells memblock to use the default alignment,
which is SMP_CACHE_BYTES, the same value alloc_bootmem() uses.
We remove a number of NULL checks on the result of
memblock_virt_alloc(). That is because memblock_virt_alloc() will panic
if it can't allocate, in exactly the same way as alloc_bootmem(), so the
NULL checks are and always have been redundant.
The memory returned by memblock_virt_alloc() is already zeroed, so we
remove several memsets of the result of memblock_virt_alloc().
Finally we convert a few uses of __alloc_bootmem(x, y, MAX_DMA_ADDRESS)
to just plain memblock_virt_alloc(). We don't use memblock_alloc_base()
because MAX_DMA_ADDRESS is ~0ul on powerpc, so limiting the allocation
to that is pointless, 16XB ought to be enough for anyone.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-11-18 13:47:35 +08:00
|
|
|
memblock_free(__pa(phb), sizeof(struct pnv_phb));
|
2011-11-16 01:29:08 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&phb->lock);
|
2013-07-31 16:47:01 +08:00
|
|
|
prop32 = of_get_property(np, "bus-range", &len);
|
|
|
|
if (prop32 && len == 8) {
|
2013-09-23 10:05:01 +08:00
|
|
|
hose->first_busno = be32_to_cpu(prop32[0]);
|
|
|
|
hose->last_busno = be32_to_cpu(prop32[1]);
|
2013-07-31 16:47:01 +08:00
|
|
|
} else {
|
|
|
|
pr_warn(" Broken <bus-range> on %s\n", np->full_name);
|
|
|
|
hose->first_busno = 0;
|
|
|
|
hose->last_busno = 0xff;
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
hose->private_data = phb;
|
2013-06-20 13:21:14 +08:00
|
|
|
phb->hub_id = hub_id;
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->opal_id = phb_id;
|
2013-04-26 03:20:57 +08:00
|
|
|
phb->type = ioda_type;
|
2015-03-25 16:23:57 +08:00
|
|
|
mutex_init(&phb->ioda.pe_alloc_mutex);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2011-11-30 02:22:53 +08:00
|
|
|
/* Detect specific models for error handling */
|
|
|
|
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
|
|
|
|
phb->model = PNV_PHB_MODEL_P7IOC;
|
2013-05-04 22:24:32 +08:00
|
|
|
else if (of_device_is_compatible(np, "ibm,power8-pciex"))
|
2013-04-26 03:20:57 +08:00
|
|
|
phb->model = PNV_PHB_MODEL_PHB3;
|
2015-12-17 10:43:13 +08:00
|
|
|
else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
|
|
|
|
phb->model = PNV_PHB_MODEL_NPU;
|
2011-11-30 02:22:53 +08:00
|
|
|
else
|
|
|
|
phb->model = PNV_PHB_MODEL_UNKNOWN;
|
|
|
|
|
2013-04-26 03:20:57 +08:00
|
|
|
/* Parse 32-bit and IO ranges (if any) */
|
2013-07-31 16:47:02 +08:00
|
|
|
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:20:57 +08:00
|
|
|
/* Get registers */
|
2016-07-08 14:37:14 +08:00
|
|
|
if (!of_address_to_resource(np, 0, &r)) {
|
|
|
|
phb->regs_phys = r.start;
|
|
|
|
phb->regs = ioremap(r.start, resource_size(&r));
|
|
|
|
if (phb->regs == NULL)
|
|
|
|
pr_err(" Failed to map registers !\n");
|
|
|
|
}
|
2016-05-20 14:41:28 +08:00
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
/* Initialize more IODA stuff */
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.total_pe_num = 1;
|
2013-04-26 03:20:57 +08:00
|
|
|
prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
|
2013-11-04 16:32:47 +08:00
|
|
|
if (prop32)
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.total_pe_num = be32_to_cpup(prop32);
|
2013-11-04 16:32:47 +08:00
|
|
|
prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
|
|
|
|
if (prop32)
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2016-05-20 14:41:29 +08:00
|
|
|
/* Invalidate RID to PE# mapping */
|
|
|
|
for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
|
|
|
|
phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
/* Parse 64-bit MMIO range */
|
|
|
|
pnv_ioda_parse_m64_window(phb);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
|
2013-04-26 03:20:57 +08:00
|
|
|
/* FW Has already off top 64k of M32 space (MSI space) */
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.m32_size += 0x10000;
|
|
|
|
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
|
2013-05-06 11:40:40 +08:00
|
|
|
phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.io_size = hose->pci_io_size;
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
|
|
|
|
|
2016-05-05 10:04:16 +08:00
|
|
|
/* Calculate how many 32-bit TCE segments we have */
|
|
|
|
phb->ioda.dma32_count = phb->ioda.m32_pci_base /
|
|
|
|
PNV_IODA1_DMA32_SEGSIZE;
|
|
|
|
|
2013-07-31 16:47:04 +08:00
|
|
|
/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
|
2016-05-12 13:47:09 +08:00
|
|
|
size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
|
|
|
|
sizeof(unsigned long));
|
2016-05-03 13:41:29 +08:00
|
|
|
m64map_off = size;
|
|
|
|
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
|
2011-11-16 01:29:08 +08:00
|
|
|
m32map_off = size;
|
2016-05-03 13:41:24 +08:00
|
|
|
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
|
2013-07-31 16:47:04 +08:00
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
|
|
|
iomap_off = size;
|
2016-05-03 13:41:24 +08:00
|
|
|
size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
|
2016-05-05 10:04:16 +08:00
|
|
|
dma32map_off = size;
|
|
|
|
size += phb->ioda.dma32_count *
|
|
|
|
sizeof(phb->ioda.dma32_segmap[0]);
|
2013-07-31 16:47:04 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
pemap_off = size;
|
2016-05-03 13:41:24 +08:00
|
|
|
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
|
powerpc: Remove more traces of bootmem
Although we are now selecting NO_BOOTMEM, we still have some traces of
bootmem lying around. That is because even with NO_BOOTMEM there is
still a shim that converts bootmem calls into memblock calls, but
ultimately we want to remove all traces of bootmem.
Most of the patch is conversions from alloc_bootmem() to
memblock_virt_alloc(). In general a call such as:
p = (struct foo *)alloc_bootmem(x);
Becomes:
p = memblock_virt_alloc(x, 0);
We don't need the cast because memblock_virt_alloc() returns a void *.
The alignment value of zero tells memblock to use the default alignment,
which is SMP_CACHE_BYTES, the same value alloc_bootmem() uses.
We remove a number of NULL checks on the result of
memblock_virt_alloc(). That is because memblock_virt_alloc() will panic
if it can't allocate, in exactly the same way as alloc_bootmem(), so the
NULL checks are and always have been redundant.
The memory returned by memblock_virt_alloc() is already zeroed, so we
remove several memsets of the result of memblock_virt_alloc().
Finally we convert a few uses of __alloc_bootmem(x, y, MAX_DMA_ADDRESS)
to just plain memblock_virt_alloc(). We don't use memblock_alloc_base()
because MAX_DMA_ADDRESS is ~0ul on powerpc, so limiting the allocation
to that is pointless, 16XB ought to be enough for anyone.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-11-18 13:47:35 +08:00
|
|
|
aux = memblock_virt_alloc(size, 0);
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.pe_alloc = aux;
|
2016-05-03 13:41:29 +08:00
|
|
|
phb->ioda.m64_segmap = aux + m64map_off;
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.m32_segmap = aux + m32map_off;
|
2016-05-03 13:41:29 +08:00
|
|
|
for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
|
|
|
|
phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
|
2016-05-03 13:41:26 +08:00
|
|
|
phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
|
2016-05-03 13:41:29 +08:00
|
|
|
}
|
2016-05-03 13:41:26 +08:00
|
|
|
if (phb->type == PNV_PHB_IODA1) {
|
2013-07-31 16:47:04 +08:00
|
|
|
phb->ioda.io_segmap = aux + iomap_off;
|
2016-05-03 13:41:26 +08:00
|
|
|
for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
|
|
|
|
phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
|
2016-05-05 10:04:16 +08:00
|
|
|
|
|
|
|
phb->ioda.dma32_segmap = aux + dma32map_off;
|
|
|
|
for (segno = 0; segno < phb->ioda.dma32_count; segno++)
|
|
|
|
phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
|
2016-05-03 13:41:26 +08:00
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
phb->ioda.pe_array = aux + pemap_off;
|
2016-05-20 14:41:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose PE number for root bus, which shouldn't have
|
|
|
|
* M64 resources consumed by its child devices. To pick
|
|
|
|
* the PE number adjacent to the reserved one if possible.
|
|
|
|
*/
|
|
|
|
pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
|
|
|
|
if (phb->ioda.reserved_pe_idx == 0) {
|
|
|
|
phb->ioda.root_pe_idx = 1;
|
|
|
|
pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
|
|
|
|
} else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
|
|
|
|
phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
|
|
|
|
pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
|
|
|
|
} else {
|
|
|
|
phb->ioda.root_pe_idx = IODA_INVALID_PE;
|
|
|
|
}
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&phb->ioda.pe_list);
|
2015-03-25 16:23:57 +08:00
|
|
|
mutex_init(&phb->ioda.pe_list_mutex);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Calculate how many 32-bit TCE segments we have */
|
2016-05-05 10:04:16 +08:00
|
|
|
phb->ioda.dma32_count = phb->ioda.m32_pci_base /
|
2016-05-03 13:41:33 +08:00
|
|
|
PNV_IODA1_DMA32_SEGSIZE;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
2013-04-26 03:20:57 +08:00
|
|
|
#if 0 /* We should really do that ... */
|
2011-11-16 01:29:08 +08:00
|
|
|
rc = opal_pci_set_phb_mem_window(opal->phb_id,
|
|
|
|
window_type,
|
|
|
|
window_num,
|
|
|
|
starting_real_address,
|
|
|
|
starting_pci_address,
|
|
|
|
segment_size);
|
|
|
|
#endif
|
|
|
|
|
2014-07-21 12:42:30 +08:00
|
|
|
pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
|
2016-05-03 13:41:24 +08:00
|
|
|
phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
|
2014-07-21 12:42:30 +08:00
|
|
|
phb->ioda.m32_size, phb->ioda.m32_segsize);
|
|
|
|
if (phb->ioda.m64_size)
|
|
|
|
pr_info(" M64: 0x%lx [segment=0x%lx]\n",
|
|
|
|
phb->ioda.m64_size, phb->ioda.m64_segsize);
|
|
|
|
if (phb->ioda.io_size)
|
|
|
|
pr_info(" IO: 0x%x [segment=0x%x]\n",
|
|
|
|
phb->ioda.io_size, phb->ioda.io_segsize);
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
phb->hose->ops = &pnv_pci_ops;
|
2014-07-21 12:42:33 +08:00
|
|
|
phb->get_pe_state = pnv_ioda_get_pe_state;
|
|
|
|
phb->freeze_pe = pnv_ioda_freeze_pe;
|
|
|
|
phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Setup MSI support */
|
|
|
|
pnv_pci_init_ioda_msis(phb);
|
|
|
|
|
2012-08-20 11:49:20 +08:00
|
|
|
/*
|
|
|
|
* We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
|
|
|
|
* to let the PCI core do resource assignment. It's supposed
|
|
|
|
* that the PCI core will do correct I/O and MMIO alignment
|
|
|
|
* for the P2P bridge bars so that each PCI bus (excluding
|
|
|
|
* the child P2P bridges) can form individual PE.
|
2011-11-16 01:29:08 +08:00
|
|
|
*/
|
2012-08-20 11:49:14 +08:00
|
|
|
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
|
2015-12-17 10:43:13 +08:00
|
|
|
|
2016-04-29 16:55:20 +08:00
|
|
|
if (phb->type == PNV_PHB_NPU) {
|
2015-12-17 10:43:13 +08:00
|
|
|
hose->controller_ops = pnv_npu_ioda_controller_ops;
|
2016-04-29 16:55:20 +08:00
|
|
|
} else {
|
|
|
|
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
|
2015-12-17 10:43:13 +08:00
|
|
|
hose->controller_ops = pnv_pci_ioda_controller_ops;
|
2016-04-29 16:55:20 +08:00
|
|
|
}
|
2015-04-14 07:29:23 +08:00
|
|
|
|
2015-03-25 16:23:55 +08:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
|
2015-03-25 16:23:56 +08:00
|
|
|
ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
|
2015-04-14 07:29:23 +08:00
|
|
|
#endif
|
|
|
|
|
2012-08-20 11:49:20 +08:00
|
|
|
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
|
2011-11-16 01:29:08 +08:00
|
|
|
|
|
|
|
/* Reset IODA tables to a clean state */
|
2014-09-30 10:39:05 +08:00
|
|
|
rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
|
2011-11-16 01:29:08 +08:00
|
|
|
if (rc)
|
2011-11-30 02:22:50 +08:00
|
|
|
pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
|
2014-04-24 16:00:25 +08:00
|
|
|
|
2016-09-16 18:39:44 +08:00
|
|
|
/*
|
|
|
|
* If we're running in kdump kernel, the previous kernel never
|
2014-04-24 16:00:25 +08:00
|
|
|
* shutdown PCI devices correctly. We already got IODA table
|
|
|
|
* cleaned out. So we have to issue PHB reset to stop all PCI
|
2016-09-16 18:39:44 +08:00
|
|
|
* transactions from previous kernel.
|
2014-04-24 16:00:25 +08:00
|
|
|
*/
|
|
|
|
if (is_kdump_kernel()) {
|
|
|
|
pr_info(" Issue PHB reset ...\n");
|
2015-02-16 11:45:47 +08:00
|
|
|
pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
|
|
|
|
pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
|
2014-04-24 16:00:25 +08:00
|
|
|
}
|
2014-07-21 12:42:30 +08:00
|
|
|
|
2014-11-12 10:36:05 +08:00
|
|
|
/* Remove M64 resource if we can't configure it successfully */
|
|
|
|
if (!phb->init_m64 || phb->init_m64(phb))
|
2014-07-21 12:42:30 +08:00
|
|
|
hose->mem_resources[1].flags = 0;
|
2013-04-26 03:20:57 +08:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:20:03 +08:00
|
|
|
void __init pnv_pci_init_ioda2_phb(struct device_node *np)
|
2013-04-26 03:20:57 +08:00
|
|
|
{
|
2013-06-20 13:21:14 +08:00
|
|
|
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
|
2015-12-17 10:43:13 +08:00
|
|
|
void __init pnv_pci_init_npu_phb(struct device_node *np)
|
|
|
|
{
|
|
|
|
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
|
|
|
|
}
|
|
|
|
|
2011-11-16 01:29:08 +08:00
|
|
|
void __init pnv_pci_init_ioda_hub(struct device_node *np)
|
|
|
|
{
|
|
|
|
struct device_node *phbn;
|
2013-09-23 10:04:57 +08:00
|
|
|
const __be64 *prop64;
|
2011-11-16 01:29:08 +08:00
|
|
|
u64 hub_id;
|
|
|
|
|
|
|
|
pr_info("Probing IODA IO-Hub %s\n", np->full_name);
|
|
|
|
|
|
|
|
prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
|
|
|
|
if (!prop64) {
|
|
|
|
pr_err(" Missing \"ibm,opal-hubid\" property !\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
hub_id = be64_to_cpup(prop64);
|
|
|
|
pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
|
|
|
|
|
|
|
|
/* Count child PHBs */
|
|
|
|
for_each_child_of_node(np, phbn) {
|
|
|
|
/* Look for IODA1 PHBs */
|
|
|
|
if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
|
2013-06-20 13:21:14 +08:00
|
|
|
pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
|
2011-11-16 01:29:08 +08:00
|
|
|
}
|
|
|
|
}
|