This patchset adds support for federated systems where multiple memory

controllers can exist and see each other over multiple PCI domains. This
 basically means that AMD node ids can be more than 8 now and the code
 handling this is taught to incorporate PCI domain into those IDs.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJQ7tvQAAoJEBLB8Bhh3lVKAdcP/iiHMUvovmwYL4H/PUUrBmKH
 JZnESlNIxWwRsecIQSqJO+O0SQa489cnpBV59yEMrs7Cu2sOhi7/ubbwzngzannS
 ab/M1GhaXn8UJ8N9NzUnxJ0KW/1cbtN1J3YgyqJ7zx9m058l3KDpDkuIyCejzRGR
 cFQHZUgjIUL7LNaAQmGZnAgsVbVUv+yzZhzeYxXQ2h6445H10pd6JEb6/aZTUFlL
 nYv2ypWdBXr27Mc8FkKdftiFw4dLUEQsNgFbBVJZtKbi4WrSTloP3dWjwo8KCtFz
 1QgvsKlNOIl5DEXl0sSoTc8Jhi05eqPANke2oTu46fSwoHGKP2atmcroSdiHmDDM
 vH6X5K6bs1fNMxqyAjvHUnbUs2aupBMjIxobZaKBLM+arK4sSt+elRGZMZxHtjgh
 tHeMpVvkXDAtNX+C9frDSiFEkj2NNvuCJc8naXdQ1cDNLuanP7dnTfS3ZcHQShoI
 FBzz0RbRkXKXcE1sQRfzT5dmt2gdgDzFjqN4rz4fEOM3+1MFRZ/B/cKLcZ25abvc
 3uweD6e6XSfFnnpz8xkr6eek3CpwHPiNYQWLXq9ick7BW5fjcHiU0Fo2rJO6G8tq
 vRqGU6P81sHlGrE0sLMiaYv9NqTYEERuKO05WSP9ryn3tdGebvpy116DeD5w/olo
 MTcWtwp69J+Eoqr7bb9z
 =ZzvJ
 -----END PGP SIGNATURE-----

Merge tag 'numascale' into x86/platform

This patchset adds support for federated systems where multiple memory
controllers can exist and see each other over multiple PCI domains. This
basically means that AMD node ids can be more than 8 now and the code
handling this is taught to incorporate PCI domain into those IDs.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
H. Peter Anvin 2013-01-22 08:37:34 -08:00
commit d29a4a5fe8
5 changed files with 80 additions and 64 deletions

View File

@ -81,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
}
static inline u16 amd_get_node_id(struct pci_dev *pdev)
{
struct pci_dev *misc;
int i;
for (i = 0; i != amd_nb_num(); i++) {
misc = node_to_amd_nb(i)->misc;
if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
return i;
}
WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
return 0;
}
#else
#define amd_nb_num(x) 0

View File

@ -943,7 +943,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
extern int amd_get_nb_id(int cpu);
extern u16 amd_get_nb_id(int cpu);
struct aperfmperf {
u64 aperf, mperf;

View File

@ -364,9 +364,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
#endif
}
int amd_get_nb_id(int cpu)
u16 amd_get_nb_id(int cpu)
{
int id = 0;
u16 id = 0;
#ifdef CONFIG_SMP
id = per_cpu(cpu_llc_id, cpu);
#endif

View File

@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
*
*FIXME: Produce a better mapping/linearisation.
*/
struct scrubrate {
static const struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* DRAM base/limit associated with node_id
*/
static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
unsigned nid)
u8 nid)
{
u64 addr;
@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
unsigned node_id;
u8 node_id;
u32 intlv_en, bits;
/*
@ -939,7 +939,8 @@ static u64 get_error_address(struct mce *m)
struct amd64_pvt *pvt;
u64 cc6_base, tmp_addr;
u32 tmp;
u8 mce_nid, intlv_en;
u16 mce_nid;
u8 intlv_en;
if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
return addr;
@ -979,10 +980,29 @@ static u64 get_error_address(struct mce *m)
return addr;
}
static struct pci_dev *pci_get_related_function(unsigned int vendor,
unsigned int device,
struct pci_dev *related)
{
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(vendor, device, dev))) {
if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
(dev->bus->number == related->bus->number) &&
(PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
break;
}
return dev;
}
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
struct amd_northbridge *nb;
struct pci_dev *misc, *f1 = NULL;
struct cpuinfo_x86 *c = &boot_cpu_data;
int off = range << 3;
u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@ -996,30 +1016,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
/* Factor in CC6 save area by reading dst node's limit reg */
if (c->x86 == 0x15) {
struct pci_dev *f1 = NULL;
u8 nid = dram_dst_node(pvt, range);
u32 llim;
/* F15h: factor in CC6 save area by reading dst node's limit reg */
if (c->x86 != 0x15)
return;
f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
if (WARN_ON(!f1))
return;
nb = node_to_amd_nb(dram_dst_node(pvt, range));
if (WARN_ON(!nb))
return;
amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
misc = nb->misc;
f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
if (WARN_ON(!f1))
return;
pvt->ranges[range].lim.lo &= GENMASK(0, 15);
amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
/* {[39:27],111b} */
pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
pvt->ranges[range].lim.lo &= GENMASK(0, 15);
pvt->ranges[range].lim.hi &= GENMASK(0, 7);
/* {[39:27],111b} */
pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
/* [47:40] */
pvt->ranges[range].lim.hi |= llim >> 13;
pvt->ranges[range].lim.hi &= GENMASK(0, 7);
pci_dev_put(f1);
}
/* [47:40] */
pvt->ranges[range].lim.hi |= llim >> 13;
pci_dev_put(f1);
}
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@ -1305,7 +1327,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
}
/* Convert the sys_addr to the normalized DCT address */
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 sys_addr, bool hi_rng,
u32 dct_sel_base_addr)
{
@ -1381,7 +1403,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@ -1672,23 +1694,6 @@ static struct amd64_family_type amd64_family_types[] = {
},
};
static struct pci_dev *pci_get_related_function(unsigned int vendor,
unsigned int device,
struct pci_dev *related)
{
struct pci_dev *dev = NULL;
dev = pci_get_device(vendor, device, dev);
while (dev) {
if ((dev->bus->number == related->bus->number) &&
(PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
break;
dev = pci_get_device(vendor, device, dev);
}
return dev;
}
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
@ -1696,7 +1701,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
*
* Algorithm courtesy of Ross LaFetra from AMD.
*/
static u16 x4_vectors[] = {
static const u16 x4_vectors[] = {
0x2f57, 0x1afe, 0x66cc, 0xdd88,
0x11eb, 0x3396, 0x7f4c, 0xeac8,
0x0001, 0x0002, 0x0004, 0x0008,
@ -1735,7 +1740,7 @@ static u16 x4_vectors[] = {
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
static u16 x8_vectors[] = {
static const u16 x8_vectors[] = {
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@ -1757,7 +1762,7 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
unsigned v_dim)
{
unsigned int i, err_sym;
@ -2181,7 +2186,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{
int cpu;
@ -2191,7 +2196,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
}
/* check MCG_CTL on all the cpus on this node */
static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@ -2224,7 +2229,7 @@ out:
return ret;
}
static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{
cpumask_var_t cmask;
int cpu;
@ -2262,7 +2267,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
return 0;
}
static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
bool ret = true;
@ -2314,7 +2319,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
return ret;
}
static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
u32 value, mask = 0x3; /* UECC/CECC enable */
@ -2353,7 +2358,7 @@ static const char *ecc_msg =
"'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n";
static bool ecc_enabled(struct pci_dev *F3, u8 nid)
static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
u32 value;
u8 ecc_en = 0;
@ -2474,7 +2479,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int err = 0, ret;
u8 nid = get_node_id(F2);
u16 nid = amd_get_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@ -2566,7 +2571,7 @@ err_ret:
static int amd64_probe_one_instance(struct pci_dev *pdev,
const struct pci_device_id *mc_type)
{
u8 nid = get_node_id(pdev);
u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
int ret = 0;
@ -2616,7 +2621,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
u8 nid = get_node_id(pdev);
u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];

View File

@ -292,12 +292,6 @@
/* MSRs */
#define MSR_MCGCTL_NBE BIT(4)
/* AMD sets the first MC device at device ID 0x18. */
static inline u8 get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@ -340,7 +334,7 @@ struct amd64_pvt {
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
unsigned mc_node_id; /* MC index of this MC node */
u16 mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@ -393,7 +387,7 @@ struct err_info {
u32 offset;
};
static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
{
u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@ -403,7 +397,7 @@ static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
}
static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
{
u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;