parisc: Replace NR_CPUS in parisc code
parisc: Replace most arrays sized by NR_CPUS with percpu variables. Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
This commit is contained in:
parent
7f2347a44d
commit
ef017bebd0
|
@ -17,6 +17,7 @@
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
#include <asm/percpu.h>
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
|
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
|
||||||
|
@ -109,8 +110,7 @@ struct cpuinfo_parisc {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct system_cpuinfo_parisc boot_cpu_data;
|
extern struct system_cpuinfo_parisc boot_cpu_data;
|
||||||
extern struct cpuinfo_parisc cpu_data[NR_CPUS];
|
DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
|
||||||
#define current_cpu_data cpu_data[smp_processor_id()]
|
|
||||||
|
|
||||||
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
|
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
|
||||||
|
|
||||||
|
|
|
@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
||||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return cpu_data[cpu].txn_addr;
|
return per_cpu(cpu_data, cpu).txn_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
|
||||||
next_cpu++; /* assign to "next" CPU we want this bugger on */
|
next_cpu++; /* assign to "next" CPU we want this bugger on */
|
||||||
|
|
||||||
/* validate entry */
|
/* validate entry */
|
||||||
while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
|
while ((next_cpu < NR_CPUS) &&
|
||||||
!cpu_online(next_cpu)))
|
(!per_cpu(cpu_data, next_cpu).txn_addr ||
|
||||||
|
!cpu_online(next_cpu)))
|
||||||
next_cpu++;
|
next_cpu++;
|
||||||
|
|
||||||
if (next_cpu >= NR_CPUS)
|
if (next_cpu >= NR_CPUS)
|
||||||
|
@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||||
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
|
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
|
||||||
irq, smp_processor_id(), cpu);
|
irq, smp_processor_id(), cpu);
|
||||||
gsc_writel(irq + CPU_IRQ_BASE,
|
gsc_writel(irq + CPU_IRQ_BASE,
|
||||||
cpu_data[cpu].hpa);
|
per_cpu(cpu_data, cpu).hpa);
|
||||||
goto set_out;
|
goto set_out;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -541,9 +541,9 @@ static int __init perf_init(void)
|
||||||
spin_lock_init(&perf_lock);
|
spin_lock_init(&perf_lock);
|
||||||
|
|
||||||
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
|
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
|
||||||
cpu_device = cpu_data[0].dev;
|
cpu_device = per_cpu(cpu_data, 0).dev;
|
||||||
printk("Performance monitoring counters enabled for %s\n",
|
printk("Performance monitoring counters enabled for %s\n",
|
||||||
cpu_data[0].dev->name);
|
per_cpu(cpu_data, 0).dev->name);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
* Initial setup-routines for HP 9000 based hardware.
|
* Initial setup-routines for HP 9000 based hardware.
|
||||||
*
|
*
|
||||||
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
|
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
|
||||||
* Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
|
* Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
|
||||||
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
|
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
|
||||||
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
|
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
|
||||||
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
|
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
|
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
|
||||||
EXPORT_SYMBOL(boot_cpu_data);
|
EXPORT_SYMBOL(boot_cpu_data);
|
||||||
|
|
||||||
struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
|
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
|
||||||
|
|
||||||
extern int update_cr16_clocksource(void); /* from time.c */
|
extern int update_cr16_clocksource(void); /* from time.c */
|
||||||
|
|
||||||
|
@ -68,6 +68,23 @@ extern int update_cr16_clocksource(void); /* from time.c */
|
||||||
** The initialization of OS data structures is the same (done below).
|
** The initialization of OS data structures is the same (done below).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* init_cpu_profiler - enable/setup per cpu profiling hooks.
|
||||||
|
* @cpunum: The processor instance.
|
||||||
|
*
|
||||||
|
* FIXME: doesn't do much yet...
|
||||||
|
*/
|
||||||
|
static void __cpuinit
|
||||||
|
init_percpu_prof(unsigned long cpunum)
|
||||||
|
{
|
||||||
|
struct cpuinfo_parisc *p;
|
||||||
|
|
||||||
|
p = &per_cpu(cpu_data, cpunum);
|
||||||
|
p->prof_counter = 1;
|
||||||
|
p->prof_multiplier = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* processor_probe - Determine if processor driver should claim this device.
|
* processor_probe - Determine if processor driver should claim this device.
|
||||||
* @dev: The device which has been found.
|
* @dev: The device which has been found.
|
||||||
|
@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
p = &cpu_data[cpuid];
|
p = &per_cpu(cpu_data, cpuid);
|
||||||
boot_cpu_data.cpu_count++;
|
boot_cpu_data.cpu_count++;
|
||||||
|
|
||||||
/* initialize counters - CPU 0 gets it_value set in time_init() */
|
/* initialize counters - CPU 0 gets it_value set in time_init() */
|
||||||
|
@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
** FIXME: review if any other initialization is clobbered
|
** FIXME: review if any other initialization is clobbered
|
||||||
** for boot_cpu by the above memset().
|
** for boot_cpu by the above memset().
|
||||||
*/
|
*/
|
||||||
|
init_percpu_prof(cpuid);
|
||||||
/* stolen from init_percpu_prof() */
|
|
||||||
cpu_data[cpuid].prof_counter = 1;
|
|
||||||
cpu_data[cpuid].prof_multiplier = 1;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* init_cpu_profiler - enable/setup per cpu profiling hooks.
|
|
||||||
* @cpunum: The processor instance.
|
|
||||||
*
|
|
||||||
* FIXME: doesn't do much yet...
|
|
||||||
*/
|
|
||||||
static inline void __init
|
|
||||||
init_percpu_prof(int cpunum)
|
|
||||||
{
|
|
||||||
cpu_data[cpunum].prof_counter = 1;
|
|
||||||
cpu_data[cpunum].prof_multiplier = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_per_cpu - Handle individual processor initializations.
|
* init_per_cpu - Handle individual processor initializations.
|
||||||
|
@ -293,7 +294,7 @@ init_percpu_prof(int cpunum)
|
||||||
*
|
*
|
||||||
* o Enable CPU profiling hooks.
|
* o Enable CPU profiling hooks.
|
||||||
*/
|
*/
|
||||||
int __init init_per_cpu(int cpunum)
|
int __cpuinit init_per_cpu(int cpunum)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct pdc_coproc_cfg coproc_cfg;
|
struct pdc_coproc_cfg coproc_cfg;
|
||||||
|
@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum)
|
||||||
/* FWIW, FP rev/model is a more accurate way to determine
|
/* FWIW, FP rev/model is a more accurate way to determine
|
||||||
** CPU type. CPU rev/model has some ambiguous cases.
|
** CPU type. CPU rev/model has some ambiguous cases.
|
||||||
*/
|
*/
|
||||||
cpu_data[cpunum].fp_rev = coproc_cfg.revision;
|
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
|
||||||
cpu_data[cpunum].fp_model = coproc_cfg.model;
|
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
|
||||||
|
|
||||||
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
|
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
|
||||||
cpunum, coproc_cfg.revision, coproc_cfg.model);
|
cpunum, coproc_cfg.revision, coproc_cfg.model);
|
||||||
|
@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum)
|
||||||
int
|
int
|
||||||
show_cpuinfo (struct seq_file *m, void *v)
|
show_cpuinfo (struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
int n;
|
unsigned long cpu;
|
||||||
|
|
||||||
for(n=0; n<boot_cpu_data.cpu_count; n++) {
|
for_each_online_cpu(cpu) {
|
||||||
|
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (0 == cpu_data[n].hpa)
|
if (0 == cpuinfo->hpa)
|
||||||
continue;
|
continue;
|
||||||
#endif
|
#endif
|
||||||
seq_printf(m, "processor\t: %d\n"
|
seq_printf(m, "processor\t: %lu\n"
|
||||||
"cpu family\t: PA-RISC %s\n",
|
"cpu family\t: PA-RISC %s\n",
|
||||||
n, boot_cpu_data.family_name);
|
cpu, boot_cpu_data.family_name);
|
||||||
|
|
||||||
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
|
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
|
||||||
|
|
||||||
|
@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v)
|
||||||
seq_printf(m, "model\t\t: %s\n"
|
seq_printf(m, "model\t\t: %s\n"
|
||||||
"model name\t: %s\n",
|
"model name\t: %s\n",
|
||||||
boot_cpu_data.pdc.sys_model_name,
|
boot_cpu_data.pdc.sys_model_name,
|
||||||
cpu_data[n].dev ?
|
cpuinfo->dev ?
|
||||||
cpu_data[n].dev->name : "Unknown" );
|
cpuinfo->dev->name : "Unknown");
|
||||||
|
|
||||||
seq_printf(m, "hversion\t: 0x%08x\n"
|
seq_printf(m, "hversion\t: 0x%08x\n"
|
||||||
"sversion\t: 0x%08x\n",
|
"sversion\t: 0x%08x\n",
|
||||||
|
@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v)
|
||||||
show_cache_info(m);
|
show_cache_info(m);
|
||||||
|
|
||||||
seq_printf(m, "bogomips\t: %lu.%02lu\n",
|
seq_printf(m, "bogomips\t: %lu.%02lu\n",
|
||||||
cpu_data[n].loops_per_jiffy / (500000 / HZ),
|
cpuinfo->loops_per_jiffy / (500000 / HZ),
|
||||||
(cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
|
(cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
|
||||||
|
|
||||||
seq_printf(m, "software id\t: %ld\n\n",
|
seq_printf(m, "software id\t: %ld\n\n",
|
||||||
boot_cpu_data.pdc.model.sw_id);
|
boot_cpu_data.pdc.model.sw_id);
|
||||||
|
|
|
@ -316,7 +316,7 @@ static int __init parisc_init(void)
|
||||||
|
|
||||||
processor_init();
|
processor_init();
|
||||||
printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
|
printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
|
||||||
boot_cpu_data.cpu_count,
|
num_present_cpus(),
|
||||||
boot_cpu_data.cpu_name,
|
boot_cpu_data.cpu_name,
|
||||||
boot_cpu_data.cpu_hz / 1000000,
|
boot_cpu_data.cpu_hz / 1000000,
|
||||||
boot_cpu_data.cpu_hz % 1000000 );
|
boot_cpu_data.cpu_hz % 1000000 );
|
||||||
|
@ -382,8 +382,8 @@ void start_parisc(void)
|
||||||
if (ret >= 0 && coproc_cfg.ccr_functional) {
|
if (ret >= 0 && coproc_cfg.ccr_functional) {
|
||||||
mtctl(coproc_cfg.ccr_functional, 10);
|
mtctl(coproc_cfg.ccr_functional, 10);
|
||||||
|
|
||||||
cpu_data[cpunum].fp_rev = coproc_cfg.revision;
|
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
|
||||||
cpu_data[cpunum].fp_model = coproc_cfg.model;
|
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
|
||||||
|
|
||||||
asm volatile ("fstd %fr0,8(%sp)");
|
asm volatile ("fstd %fr0,8(%sp)");
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -56,16 +56,17 @@ static int smp_debug_lvl = 0;
|
||||||
if (lvl >= smp_debug_lvl) \
|
if (lvl >= smp_debug_lvl) \
|
||||||
printk(printargs);
|
printk(printargs);
|
||||||
#else
|
#else
|
||||||
#define smp_debug(lvl, ...)
|
#define smp_debug(lvl, ...) do { } while(0)
|
||||||
#endif /* DEBUG_SMP */
|
#endif /* DEBUG_SMP */
|
||||||
|
|
||||||
DEFINE_SPINLOCK(smp_lock);
|
DEFINE_SPINLOCK(smp_lock);
|
||||||
|
|
||||||
volatile struct task_struct *smp_init_current_idle_task;
|
volatile struct task_struct *smp_init_current_idle_task;
|
||||||
|
|
||||||
static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
|
/* track which CPU is booting */
|
||||||
|
static volatile int cpu_now_booting __cpuinitdata;
|
||||||
|
|
||||||
static int parisc_max_cpus __read_mostly = 1;
|
static int parisc_max_cpus __cpuinitdata = 1;
|
||||||
|
|
||||||
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
|
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
|
@ -123,7 +124,7 @@ irqreturn_t
|
||||||
ipi_interrupt(int irq, void *dev_id)
|
ipi_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
||||||
struct cpuinfo_parisc *p = &cpu_data[this_cpu];
|
struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
|
||||||
unsigned long ops;
|
unsigned long ops;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -202,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id)
|
||||||
static inline void
|
static inline void
|
||||||
ipi_send(int cpu, enum ipi_message_type op)
|
ipi_send(int cpu, enum ipi_message_type op)
|
||||||
{
|
{
|
||||||
struct cpuinfo_parisc *p = &cpu_data[cpu];
|
struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
|
||||||
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
|
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(lock, flags);
|
spin_lock_irqsave(lock, flags);
|
||||||
p->pending_ipi |= 1 << op;
|
p->pending_ipi |= 1 << op;
|
||||||
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
|
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
|
||||||
spin_unlock_irqrestore(lock, flags);
|
spin_unlock_irqrestore(lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,6 +342,7 @@ void __init smp_callin(void)
|
||||||
*/
|
*/
|
||||||
int __cpuinit smp_boot_one_cpu(int cpuid)
|
int __cpuinit smp_boot_one_cpu(int cpuid)
|
||||||
{
|
{
|
||||||
|
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
|
||||||
struct task_struct *idle;
|
struct task_struct *idle;
|
||||||
long timeout;
|
long timeout;
|
||||||
|
|
||||||
|
@ -372,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
|
||||||
smp_init_current_idle_task = idle ;
|
smp_init_current_idle_task = idle ;
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
|
printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** This gets PDC to release the CPU from a very tight loop.
|
** This gets PDC to release the CPU from a very tight loop.
|
||||||
|
@ -383,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
|
||||||
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
|
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
|
||||||
** contents of memory are valid."
|
** contents of memory are valid."
|
||||||
*/
|
*/
|
||||||
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
|
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -415,12 +417,12 @@ alive:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __devinit smp_prepare_boot_cpu(void)
|
void __init smp_prepare_boot_cpu(void)
|
||||||
{
|
{
|
||||||
int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
|
int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
|
||||||
|
|
||||||
/* Setup BSP mappings */
|
/* Setup BSP mappings */
|
||||||
printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
|
printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
|
||||||
|
|
||||||
cpu_set(bootstrap_processor, cpu_online_map);
|
cpu_set(bootstrap_processor, cpu_online_map);
|
||||||
cpu_set(bootstrap_processor, cpu_present_map);
|
cpu_set(bootstrap_processor, cpu_present_map);
|
||||||
|
|
|
@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
unsigned long cycles_elapsed, ticks_elapsed;
|
unsigned long cycles_elapsed, ticks_elapsed;
|
||||||
unsigned long cycles_remainder;
|
unsigned long cycles_remainder;
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
|
struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
|
||||||
|
|
||||||
/* gcc can optimize for "read-only" case with a local clocktick */
|
/* gcc can optimize for "read-only" case with a local clocktick */
|
||||||
unsigned long cpt = clocktick;
|
unsigned long cpt = clocktick;
|
||||||
|
@ -213,7 +213,7 @@ void __init start_cpu_itimer(void)
|
||||||
|
|
||||||
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
|
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
|
||||||
|
|
||||||
cpu_data[cpu].it_value = next_tick;
|
per_cpu(cpu_data, cpu).it_value = next_tick;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct platform_device rtc_parisc_dev = {
|
struct platform_device rtc_parisc_dev = {
|
||||||
|
|
|
@ -22,14 +22,14 @@
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
|
|
||||||
static struct cpu cpu_devices[NR_CPUS] __read_mostly;
|
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||||
|
|
||||||
static int __init topology_init(void)
|
static int __init topology_init(void)
|
||||||
{
|
{
|
||||||
int num;
|
int num;
|
||||||
|
|
||||||
for_each_present_cpu(num) {
|
for_each_present_cpu(num) {
|
||||||
register_cpu(&cpu_devices[num], num);
|
register_cpu(&per_cpu(cpu_devices, num), num);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue