MIPS: Netlogic: Move cores per node out of multi-node.h
Use the current_cpu_data package field to get the node of the current CPU. This allows us to remove xlp_cores_per_node and move nlm_threads_per_node() and nlm_cores_per_node() to netlogic/common.h, which simplifies code. Signed-off-by: Jayachandran C <jchandra@broadcom.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8889/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
65fecc2725
commit
c273652546
|
@ -48,15 +48,6 @@
|
|||
#endif
|
||||
|
||||
#define NLM_THREADS_PER_CORE 4
|
||||
#ifdef CONFIG_CPU_XLR
|
||||
#define nlm_cores_per_node() 8
|
||||
#else
|
||||
extern unsigned int xlp_cores_per_node;
|
||||
#define nlm_cores_per_node() xlp_cores_per_node
|
||||
#endif
|
||||
|
||||
#define nlm_threads_per_node() (nlm_cores_per_node() * NLM_THREADS_PER_CORE)
|
||||
#define nlm_cpuid_to_node(c) ((c) / nlm_threads_per_node())
|
||||
|
||||
struct nlm_soc_info {
|
||||
unsigned long coremask; /* cores enabled on the soc */
|
||||
|
|
|
@ -111,6 +111,25 @@ static inline int nlm_irq_to_xirq(int node, int irq)
|
|||
return node * NR_IRQS / NLM_NR_NODES + irq;
|
||||
}
|
||||
|
||||
extern int nlm_cpu_ready[];
|
||||
#ifdef CONFIG_CPU_XLR
|
||||
#define nlm_cores_per_node() 8
|
||||
#else
|
||||
static inline int nlm_cores_per_node(void)
|
||||
{
|
||||
return ((read_c0_prid() & PRID_IMP_MASK)
|
||||
== PRID_IMP_NETLOGIC_XLP9XX) ? 32 : 8;
|
||||
}
|
||||
#endif
|
||||
static inline int nlm_threads_per_node(void)
|
||||
{
|
||||
return nlm_cores_per_node() * NLM_THREADS_PER_CORE;
|
||||
}
|
||||
|
||||
static inline int nlm_hwtid_to_node(int hwtid)
|
||||
{
|
||||
return hwtid / nlm_threads_per_node();
|
||||
}
|
||||
|
||||
extern int nlm_cpu_ready[];
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _NETLOGIC_COMMON_H_ */
|
||||
|
|
|
@ -230,16 +230,16 @@ static void nlm_init_node_irqs(int node)
|
|||
}
|
||||
}
|
||||
|
||||
void nlm_smp_irq_init(int hwcpuid)
|
||||
void nlm_smp_irq_init(int hwtid)
|
||||
{
|
||||
int node, cpu;
|
||||
int cpu, node;
|
||||
|
||||
node = nlm_cpuid_to_node(hwcpuid);
|
||||
cpu = hwcpuid % nlm_threads_per_node();
|
||||
cpu = hwtid % nlm_threads_per_node();
|
||||
node = hwtid / nlm_threads_per_node();
|
||||
|
||||
if (cpu == 0 && node != 0)
|
||||
nlm_init_node_irqs(node);
|
||||
write_c0_eimr(nlm_current_node()->irqmask);
|
||||
write_c0_eimr(nlm_get_node(node)->irqmask);
|
||||
}
|
||||
|
||||
asmlinkage void plat_irq_dispatch(void)
|
||||
|
|
|
@ -59,17 +59,17 @@
|
|||
|
||||
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
|
||||
{
|
||||
int cpu, node;
|
||||
unsigned int hwtid;
|
||||
uint64_t picbase;
|
||||
|
||||
cpu = cpu_logical_map(logical_cpu);
|
||||
node = nlm_cpuid_to_node(cpu);
|
||||
picbase = nlm_get_node(node)->picbase;
|
||||
/* node id is part of hwtid, and needed for send_ipi */
|
||||
hwtid = cpu_logical_map(logical_cpu);
|
||||
picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
|
||||
nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
|
||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
|
||||
nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
|
||||
}
|
||||
|
||||
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
|
@ -120,7 +120,7 @@ static void nlm_init_secondary(void)
|
|||
|
||||
hwtid = hard_smp_processor_id();
|
||||
current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
|
||||
current_cpu_data.package = nlm_cpuid_to_node(hwtid);
|
||||
current_cpu_data.package = nlm_nodeid();
|
||||
nlm_percpu_init(hwtid);
|
||||
nlm_smp_irq_init(hwtid);
|
||||
}
|
||||
|
@ -146,16 +146,18 @@ static cpumask_t phys_cpu_present_mask;
|
|||
|
||||
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
|
||||
{
|
||||
int cpu, node;
|
||||
uint64_t picbase;
|
||||
int hwtid;
|
||||
|
||||
hwtid = cpu_logical_map(logical_cpu);
|
||||
picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
|
||||
|
||||
cpu = cpu_logical_map(logical_cpu);
|
||||
node = nlm_cpuid_to_node(logical_cpu);
|
||||
nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
|
||||
nlm_next_gp = (unsigned long)task_thread_info(idle);
|
||||
|
||||
/* barrier for sp/gp store above */
|
||||
__sync();
|
||||
nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
|
||||
nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */
|
||||
}
|
||||
|
||||
void __init nlm_smp_setup(void)
|
||||
|
@ -183,7 +185,7 @@ void __init nlm_smp_setup(void)
|
|||
__cpu_number_map[i] = num_cpus;
|
||||
__cpu_logical_map[num_cpus] = i;
|
||||
set_cpu_possible(num_cpus, true);
|
||||
node = nlm_cpuid_to_node(i);
|
||||
node = nlm_hwtid_to_node(i);
|
||||
cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
|
||||
++num_cpus;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,6 @@ uint64_t nlm_io_base;
|
|||
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
|
||||
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
|
||||
unsigned int nlm_threads_per_core;
|
||||
unsigned int xlp_cores_per_node;
|
||||
|
||||
static void nlm_linux_exit(void)
|
||||
{
|
||||
|
@ -163,10 +162,6 @@ void __init prom_init(void)
|
|||
void *reset_vec;
|
||||
|
||||
nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
|
||||
if (cpu_is_xlp9xx())
|
||||
xlp_cores_per_node = 32;
|
||||
else
|
||||
xlp_cores_per_node = 8;
|
||||
nlm_init_boot_cpu();
|
||||
xlp_mmu_init();
|
||||
nlm_node_init(0);
|
||||
|
|
|
@ -111,7 +111,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
|
|||
struct nlm_soc_info *nodep;
|
||||
uint64_t syspcibase, fusebase;
|
||||
uint32_t syscoremask, mask, fusemask;
|
||||
int core, n, cpu;
|
||||
int core, n, cpu, ncores;
|
||||
|
||||
for (n = 0; n < NLM_NR_NODES; n++) {
|
||||
if (n != 0) {
|
||||
|
@ -168,7 +168,8 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
|
|||
syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
|
||||
|
||||
pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
|
||||
for (core = 0; core < nlm_cores_per_node(); core++) {
|
||||
ncores = nlm_cores_per_node();
|
||||
for (core = 0; core < ncores; core++) {
|
||||
/* we will be on node 0 core 0 */
|
||||
if (n == 0 && core == 0)
|
||||
continue;
|
||||
|
@ -178,8 +179,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
|
|||
continue;
|
||||
|
||||
/* see if at least the first hw thread is enabled */
|
||||
cpu = (n * nlm_cores_per_node() + core)
|
||||
* NLM_THREADS_PER_CORE;
|
||||
cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
|
||||
if (!cpumask_test_cpu(cpu, wakeup_mask))
|
||||
continue;
|
||||
|
||||
|
|
Loading…
Reference in New Issue