MIPS: Abstract CPU core & VP(E) ID access through accessor functions
We currently have fields in struct cpuinfo_mips for the core & VP(E) ID of a particular CPU, and various pieces of code directly access those fields. This patch abstracts such access by introducing accessor functions cpu_core(), cpu_set_core(), cpu_vpe_id() & cpu_set_vpe_id() and having code that needs to access these values call those functions rather than directly accessing the struct cpuinfo_mips fields. This prepares us for changes to the way in which those values are stored in later patches. The cpu_vpe_id() function is introduced even though we already had a cpu_vpe_id() macro for a couple of reasons: 1) It's more consistent with the core, and future cluster, accessors. 2) It ensures a sensible return type without explicit casts. 3) It's generally preferable to use functions rather than macros. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17009/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
15e6529fc3
commit
f875a832d2
|
@ -144,11 +144,32 @@ struct proc_cpuinfo_notifier_args {
|
||||||
unsigned long n;
|
unsigned long n;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline unsigned int cpu_core(struct cpuinfo_mips *cpuinfo)
|
||||||
|
{
|
||||||
|
return cpuinfo->core;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cpu_set_core(struct cpuinfo_mips *cpuinfo,
|
||||||
|
unsigned int core)
|
||||||
|
{
|
||||||
|
cpuinfo->core = core;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int cpu_vpe_id(struct cpuinfo_mips *cpuinfo)
|
||||||
|
{
|
||||||
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
|
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
|
||||||
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
|
return cpuinfo->vpe_id;
|
||||||
#else
|
|
||||||
# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; })
|
|
||||||
#endif
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo,
|
||||||
|
unsigned int vpe)
|
||||||
|
{
|
||||||
|
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
|
||||||
|
cpuinfo->vpe_id = vpe;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long cpu_asid_inc(void)
|
static inline unsigned long cpu_asid_inc(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -428,7 +428,7 @@ static inline unsigned int mips_cm_max_vp_width(void)
|
||||||
*/
|
*/
|
||||||
static inline unsigned int mips_cm_vp_id(unsigned int cpu)
|
static inline unsigned int mips_cm_vp_id(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int core = cpu_data[cpu].core;
|
unsigned int core = cpu_core(&cpu_data[cpu]);
|
||||||
unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
|
unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
|
||||||
|
|
||||||
return (core * mips_cm_max_vp_width()) + vp;
|
return (core * mips_cm_max_vp_width()) + vp;
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
|
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
|
||||||
#define topology_core_id(cpu) (cpu_data[cpu].core)
|
#define topology_core_id(cpu) (cpu_core(&cpu_data[cpu]))
|
||||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||||
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
|
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -919,9 +919,12 @@ static void decode_configs(struct cpuinfo_mips *c)
|
||||||
|
|
||||||
#ifndef CONFIG_MIPS_CPS
|
#ifndef CONFIG_MIPS_CPS
|
||||||
if (cpu_has_mips_r2_r6) {
|
if (cpu_has_mips_r2_r6) {
|
||||||
c->core = get_ebase_cpunum();
|
unsigned int core;
|
||||||
|
|
||||||
|
core = get_ebase_cpunum();
|
||||||
if (cpu_has_mipsmt)
|
if (cpu_has_mipsmt)
|
||||||
c->core >>= fls(core_nvpes()) - 1;
|
core >>= fls(core_nvpes()) - 1;
|
||||||
|
cpu_set_core(c, core);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,7 +287,7 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
|
||||||
* CM 2.5 & older, so have to ensure other VP(E)s don't
|
* CM 2.5 & older, so have to ensure other VP(E)s don't
|
||||||
* race with us.
|
* race with us.
|
||||||
*/
|
*/
|
||||||
curr_core = current_cpu_data.core;
|
curr_core = cpu_core(¤t_cpu_data);
|
||||||
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
|
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
|
||||||
per_cpu(cm_core_lock_flags, curr_core));
|
per_cpu(cm_core_lock_flags, curr_core));
|
||||||
|
|
||||||
|
@ -308,7 +308,7 @@ void mips_cm_unlock_other(void)
|
||||||
unsigned int curr_core;
|
unsigned int curr_core;
|
||||||
|
|
||||||
if (mips_cm_revision() < CM_REV_CM3) {
|
if (mips_cm_revision() < CM_REV_CM3) {
|
||||||
curr_core = current_cpu_data.core;
|
curr_core = cpu_core(¤t_cpu_data);
|
||||||
spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
|
spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
|
||||||
per_cpu(cm_core_lock_flags, curr_core));
|
per_cpu(cm_core_lock_flags, curr_core));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -86,7 +86,7 @@ void mips_cpc_lock_other(unsigned int core)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
curr_core = current_cpu_data.core;
|
curr_core = cpu_core(¤t_cpu_data);
|
||||||
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
|
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
|
||||||
per_cpu(cpc_core_lock_flags, curr_core));
|
per_cpu(cpc_core_lock_flags, curr_core));
|
||||||
write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
|
write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
|
||||||
|
@ -106,7 +106,7 @@ void mips_cpc_unlock_other(void)
|
||||||
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
|
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
|
||||||
return;
|
return;
|
||||||
|
|
||||||
curr_core = current_cpu_data.core;
|
curr_core = cpu_core(¤t_cpu_data);
|
||||||
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
|
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
|
||||||
per_cpu(cpc_core_lock_flags, curr_core));
|
per_cpu(cpc_core_lock_flags, curr_core));
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
|
@ -114,7 +114,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
|
||||||
int cps_pm_enter_state(enum cps_pm_state state)
|
int cps_pm_enter_state(enum cps_pm_state state)
|
||||||
{
|
{
|
||||||
unsigned cpu = smp_processor_id();
|
unsigned cpu = smp_processor_id();
|
||||||
unsigned core = current_cpu_data.core;
|
unsigned core = cpu_core(¤t_cpu_data);
|
||||||
unsigned online, left;
|
unsigned online, left;
|
||||||
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
|
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
|
||||||
u32 *core_ready_count, *nc_core_ready_count;
|
u32 *core_ready_count, *nc_core_ready_count;
|
||||||
|
@ -486,7 +486,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
|
||||||
* defined by the interAptiv & proAptiv SUMs as ensuring that the
|
* defined by the interAptiv & proAptiv SUMs as ensuring that the
|
||||||
* operation resulting from the preceding store is complete.
|
* operation resulting from the preceding store is complete.
|
||||||
*/
|
*/
|
||||||
uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
|
uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
|
||||||
uasm_i_sw(&p, t0, 0, r_pcohctl);
|
uasm_i_sw(&p, t0, 0, r_pcohctl);
|
||||||
uasm_i_lw(&p, t0, 0, r_pcohctl);
|
uasm_i_lw(&p, t0, 0, r_pcohctl);
|
||||||
|
|
||||||
|
@ -640,7 +640,7 @@ out_err:
|
||||||
static int cps_pm_online_cpu(unsigned int cpu)
|
static int cps_pm_online_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
enum cps_pm_state state;
|
enum cps_pm_state state;
|
||||||
unsigned core = cpu_data[cpu].core;
|
unsigned core = cpu_core(&cpu_data[cpu]);
|
||||||
void *entry_fn, *core_rc;
|
void *entry_fn, *core_rc;
|
||||||
|
|
||||||
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
|
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
|
||||||
|
|
|
@ -134,13 +134,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
seq_printf(m, "kscratch registers\t: %d\n",
|
seq_printf(m, "kscratch registers\t: %d\n",
|
||||||
hweight8(cpu_data[n].kscratch_mask));
|
hweight8(cpu_data[n].kscratch_mask));
|
||||||
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
|
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
|
||||||
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
|
seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n]));
|
||||||
|
|
||||||
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
|
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
|
||||||
if (cpu_has_mipsmt)
|
if (cpu_has_mipsmt)
|
||||||
seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
|
seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
|
||||||
else if (cpu_has_vp)
|
else if (cpu_has_vp)
|
||||||
seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id);
|
seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
|
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
|
||||||
|
|
|
@ -245,7 +245,7 @@ static void bmips_init_secondary(void)
|
||||||
break;
|
break;
|
||||||
case CPU_BMIPS5000:
|
case CPU_BMIPS5000:
|
||||||
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
|
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
|
||||||
current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3;
|
cpu_set_core(¤t_cpu_data, (read_c0_brcm_config() >> 25) & 3);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,10 +76,8 @@ static void __init cps_smp_setup(void)
|
||||||
smp_num_siblings = core_vpes;
|
smp_num_siblings = core_vpes;
|
||||||
|
|
||||||
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
|
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
|
||||||
cpu_data[nvpes + v].core = c;
|
cpu_set_core(&cpu_data[nvpes + v], c);
|
||||||
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
|
cpu_set_vpe_id(&cpu_data[nvpes + v], v);
|
||||||
cpu_data[nvpes + v].vpe_id = v;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nvpes += core_vpes;
|
nvpes += core_vpes;
|
||||||
|
@ -149,7 +147,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
|
||||||
cpu_has_dc_aliases ? "dcache aliasing" : "");
|
cpu_has_dc_aliases ? "dcache aliasing" : "");
|
||||||
|
|
||||||
for_each_present_cpu(c) {
|
for_each_present_cpu(c) {
|
||||||
if (cpu_data[c].core)
|
if (cpu_core(&cpu_data[c]))
|
||||||
set_cpu_present(c, false);
|
set_cpu_present(c, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -189,7 +187,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark this CPU as booted */
|
/* Mark this CPU as booted */
|
||||||
atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
|
atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask,
|
||||||
1 << cpu_vpe_id(¤t_cpu_data));
|
1 << cpu_vpe_id(¤t_cpu_data));
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -284,7 +282,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
|
||||||
|
|
||||||
static void remote_vpe_boot(void *dummy)
|
static void remote_vpe_boot(void *dummy)
|
||||||
{
|
{
|
||||||
unsigned core = current_cpu_data.core;
|
unsigned core = cpu_core(¤t_cpu_data);
|
||||||
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
|
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
|
||||||
|
|
||||||
mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
|
mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
|
||||||
|
@ -292,7 +290,7 @@ static void remote_vpe_boot(void *dummy)
|
||||||
|
|
||||||
static void cps_boot_secondary(int cpu, struct task_struct *idle)
|
static void cps_boot_secondary(int cpu, struct task_struct *idle)
|
||||||
{
|
{
|
||||||
unsigned core = cpu_data[cpu].core;
|
unsigned core = cpu_core(&cpu_data[cpu]);
|
||||||
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
||||||
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
|
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
|
||||||
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
|
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
|
||||||
|
@ -321,10 +319,10 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
|
||||||
mips_cm_unlock_other();
|
mips_cm_unlock_other();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (core != current_cpu_data.core) {
|
if (core != cpu_core(¤t_cpu_data)) {
|
||||||
/* Boot a VPE on another powered up core */
|
/* Boot a VPE on another powered up core */
|
||||||
for (remote = 0; remote < NR_CPUS; remote++) {
|
for (remote = 0; remote < NR_CPUS; remote++) {
|
||||||
if (cpu_data[remote].core != core)
|
if (cpu_core(&cpu_data[remote]) != core)
|
||||||
continue;
|
continue;
|
||||||
if (cpu_online(remote))
|
if (cpu_online(remote))
|
||||||
break;
|
break;
|
||||||
|
@ -401,7 +399,7 @@ static int cps_cpu_disable(void)
|
||||||
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
|
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
|
core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)];
|
||||||
atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
|
atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
set_cpu_online(cpu, false);
|
set_cpu_online(cpu, false);
|
||||||
|
@ -423,15 +421,17 @@ void play_dead(void)
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
idle_task_exit();
|
idle_task_exit();
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
core = cpu_data[cpu].core;
|
core = cpu_core(&cpu_data[cpu]);
|
||||||
cpu_death = CPU_DEATH_POWER;
|
cpu_death = CPU_DEATH_POWER;
|
||||||
|
|
||||||
pr_debug("CPU%d going offline\n", cpu);
|
pr_debug("CPU%d going offline\n", cpu);
|
||||||
|
|
||||||
if (cpu_has_mipsmt || cpu_has_vp) {
|
if (cpu_has_mipsmt || cpu_has_vp) {
|
||||||
|
core = cpu_core(&cpu_data[cpu]);
|
||||||
|
|
||||||
/* Look for another online VPE within the core */
|
/* Look for another online VPE within the core */
|
||||||
for_each_online_cpu(cpu_death_sibling) {
|
for_each_online_cpu(cpu_death_sibling) {
|
||||||
if (cpu_data[cpu_death_sibling].core != core)
|
if (cpu_core(&cpu_data[cpu_death_sibling]) != core)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -487,7 +487,7 @@ static void wait_for_sibling_halt(void *ptr_cpu)
|
||||||
|
|
||||||
static void cps_cpu_die(unsigned int cpu)
|
static void cps_cpu_die(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned core = cpu_data[cpu].core;
|
unsigned core = cpu_core(&cpu_data[cpu]);
|
||||||
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
||||||
ktime_t fail_time;
|
ktime_t fail_time;
|
||||||
unsigned stat;
|
unsigned stat;
|
||||||
|
|
|
@ -83,7 +83,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
|
||||||
if (tc != 0)
|
if (tc != 0)
|
||||||
smvp_copy_vpe_config();
|
smvp_copy_vpe_config();
|
||||||
|
|
||||||
cpu_data[ncpu].vpe_id = tc;
|
cpu_set_vpe_id(&cpu_data[ncpu], tc);
|
||||||
|
|
||||||
return ncpu;
|
return ncpu;
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,7 +97,7 @@ static inline void set_cpu_sibling_map(int cpu)
|
||||||
if (smp_num_siblings > 1) {
|
if (smp_num_siblings > 1) {
|
||||||
for_each_cpu(i, &cpu_sibling_setup_map) {
|
for_each_cpu(i, &cpu_sibling_setup_map) {
|
||||||
if (cpu_data[cpu].package == cpu_data[i].package &&
|
if (cpu_data[cpu].package == cpu_data[i].package &&
|
||||||
cpu_data[cpu].core == cpu_data[i].core) {
|
cpu_core(&cpu_data[cpu]) == cpu_core(&cpu_data[i])) {
|
||||||
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
|
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
|
||||||
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
|
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
|
||||||
}
|
}
|
||||||
|
@ -135,7 +135,7 @@ void calculate_cpu_foreign_map(void)
|
||||||
core_present = 0;
|
core_present = 0;
|
||||||
for_each_cpu(k, &temp_foreign_map)
|
for_each_cpu(k, &temp_foreign_map)
|
||||||
if (cpu_data[i].package == cpu_data[k].package &&
|
if (cpu_data[i].package == cpu_data[k].package &&
|
||||||
cpu_data[i].core == cpu_data[k].core)
|
cpu_core(&cpu_data[i]) == cpu_core(&cpu_data[k]))
|
||||||
core_present = 1;
|
core_present = 1;
|
||||||
if (!core_present)
|
if (!core_present)
|
||||||
cpumask_set_cpu(i, &temp_foreign_map);
|
cpumask_set_cpu(i, &temp_foreign_map);
|
||||||
|
@ -186,9 +186,9 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||||
|
|
||||||
if (mips_cpc_present()) {
|
if (mips_cpc_present()) {
|
||||||
for_each_cpu(cpu, mask) {
|
for_each_cpu(cpu, mask) {
|
||||||
core = cpu_data[cpu].core;
|
core = cpu_core(&cpu_data[cpu]);
|
||||||
|
|
||||||
if (core == current_cpu_data.core)
|
if (core == cpu_core(¤t_cpu_data))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
|
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
|
||||||
|
|
|
@ -319,8 +319,8 @@ static void loongson3_init_secondary(void)
|
||||||
loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
|
loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
|
||||||
|
|
||||||
per_cpu(cpu_state, cpu) = CPU_ONLINE;
|
per_cpu(cpu_state, cpu) = CPU_ONLINE;
|
||||||
cpu_data[cpu].core =
|
cpu_set_core(&cpu_data[cpu],
|
||||||
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
|
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
|
||||||
cpu_data[cpu].package =
|
cpu_data[cpu].package =
|
||||||
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
|
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
|
||||||
|
|
||||||
|
@ -386,7 +386,8 @@ static void __init loongson3_smp_setup(void)
|
||||||
ipi_status0_regs_init();
|
ipi_status0_regs_init();
|
||||||
ipi_en0_regs_init();
|
ipi_en0_regs_init();
|
||||||
ipi_mailbox_buf_init();
|
ipi_mailbox_buf_init();
|
||||||
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
|
cpu_set_core(&cpu_data[0],
|
||||||
|
cpu_logical_map(0) % loongson_sysconf.cores_per_package);
|
||||||
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
|
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -697,7 +698,7 @@ void play_dead(void)
|
||||||
|
|
||||||
static int loongson3_disable_clock(unsigned int cpu)
|
static int loongson3_disable_clock(unsigned int cpu)
|
||||||
{
|
{
|
||||||
uint64_t core_id = cpu_data[cpu].core;
|
uint64_t core_id = cpu_core(&cpu_data[cpu]);
|
||||||
uint64_t package_id = cpu_data[cpu].package;
|
uint64_t package_id = cpu_data[cpu].package;
|
||||||
|
|
||||||
if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
|
if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
|
||||||
|
@ -711,7 +712,7 @@ static int loongson3_disable_clock(unsigned int cpu)
|
||||||
|
|
||||||
static int loongson3_enable_clock(unsigned int cpu)
|
static int loongson3_enable_clock(unsigned int cpu)
|
||||||
{
|
{
|
||||||
uint64_t core_id = cpu_data[cpu].core;
|
uint64_t core_id = cpu_core(&cpu_data[cpu]);
|
||||||
uint64_t package_id = cpu_data[cpu].package;
|
uint64_t package_id = cpu_data[cpu].package;
|
||||||
|
|
||||||
if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
|
if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
|
||||||
|
|
|
@ -122,7 +122,7 @@ static void nlm_init_secondary(void)
|
||||||
int hwtid;
|
int hwtid;
|
||||||
|
|
||||||
hwtid = hard_smp_processor_id();
|
hwtid = hard_smp_processor_id();
|
||||||
current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
|
cpu_set_core(¤t_cpu_data, hwtid / NLM_THREADS_PER_CORE);
|
||||||
current_cpu_data.package = nlm_nodeid();
|
current_cpu_data.package = nlm_nodeid();
|
||||||
nlm_percpu_init(hwtid);
|
nlm_percpu_init(hwtid);
|
||||||
nlm_smp_irq_init(hwtid);
|
nlm_smp_irq_init(hwtid);
|
||||||
|
|
|
@ -38,9 +38,9 @@ static int perfcount_irq;
|
||||||
#ifdef CONFIG_MIPS_MT_SMP
|
#ifdef CONFIG_MIPS_MT_SMP
|
||||||
static int cpu_has_mipsmt_pertccounters;
|
static int cpu_has_mipsmt_pertccounters;
|
||||||
#define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
|
#define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
|
||||||
M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
|
M_PERFCTL_VPEID(cpu_vpe_id(¤t_cpu_data)))
|
||||||
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
|
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
|
||||||
0 : cpu_data[smp_processor_id()].vpe_id)
|
0 : cpu_vpe_id(¤t_cpu_data))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The number of bits to shift to convert between counters per core and
|
* The number of bits to shift to convert between counters per core and
|
||||||
|
|
|
@ -37,7 +37,7 @@ static int cps_nc_enter(struct cpuidle_device *dev,
|
||||||
* TODO: don't treat core 0 specially, just prevent the final core
|
* TODO: don't treat core 0 specially, just prevent the final core
|
||||||
* TODO: remap interrupt affinity temporarily
|
* TODO: remap interrupt affinity temporarily
|
||||||
*/
|
*/
|
||||||
if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
|
if (!cpu_core(&cpu_data[dev->cpu]) && (index > STATE_NC_WAIT))
|
||||||
index = STATE_NC_WAIT;
|
index = STATE_NC_WAIT;
|
||||||
|
|
||||||
/* Select the appropriate cps_pm_state */
|
/* Select the appropriate cps_pm_state */
|
||||||
|
|
Loading…
Reference in New Issue