powerpc: remove cpu_online_cores_map function
This function builds the cores online map with on-stack cpumasks which can cause high stack usage with large NR_CPUS. It is not used in any performance sensitive paths, so instead just check for first thread sibling. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Sachin Sant <sachinp@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211105035042.1398309-1-npiggin@gmail.com
This commit is contained in:
parent
af3fdce4ab
commit
b350111bf7
|
@ -32,44 +32,11 @@ extern cpumask_t threads_core_mask;
|
|||
#define threads_core_mask (*get_cpu_mask(0))
|
||||
#endif
|
||||
|
||||
/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
|
||||
* hit by the argument
|
||||
*
|
||||
* @threads: a cpumask of online threads
|
||||
*
|
||||
* This function returns a cpumask which will have one online cpu's
|
||||
* bit set for each core that has at least one thread set in the argument.
|
||||
*
|
||||
* This can typically be used for things like IPI for tlb invalidations
|
||||
* since those need to be done only once per core/TLB
|
||||
*/
|
||||
static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
|
||||
{
|
||||
cpumask_t tmp, res;
|
||||
int i, cpu;
|
||||
|
||||
cpumask_clear(&res);
|
||||
for (i = 0; i < NR_CPUS; i += threads_per_core) {
|
||||
cpumask_shift_left(&tmp, &threads_core_mask, i);
|
||||
if (cpumask_intersects(threads, &tmp)) {
|
||||
cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
cpumask_set_cpu(cpu, &res);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline int cpu_nr_cores(void)
|
||||
{
|
||||
return nr_cpu_ids >> threads_shift;
|
||||
}
|
||||
|
||||
static inline cpumask_t cpu_online_cores_map(void)
|
||||
{
|
||||
return cpu_thread_mask_to_cores(cpu_online_mask);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu_core_index_of_thread(int cpu);
|
||||
int cpu_first_thread_of_core(int core);
|
||||
|
|
|
@ -146,9 +146,13 @@ EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
|
|||
static void pnv_fastsleep_workaround_apply(void *info)
|
||||
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int rc;
|
||||
int *err = info;
|
||||
|
||||
if (cpu_first_thread_sibling(cpu) != cpu)
|
||||
return;
|
||||
|
||||
rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
|
||||
OPAL_CONFIG_IDLE_APPLY);
|
||||
if (rc)
|
||||
|
@ -175,7 +179,6 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
|
|||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
cpumask_t primary_thread_mask;
|
||||
int err;
|
||||
u8 val;
|
||||
|
||||
|
@ -200,10 +203,7 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
|
|||
power7_fastsleep_workaround_exit = false;
|
||||
|
||||
cpus_read_lock();
|
||||
primary_thread_mask = cpu_online_cores_map();
|
||||
on_each_cpu_mask(&primary_thread_mask,
|
||||
pnv_fastsleep_workaround_apply,
|
||||
&err, 1);
|
||||
on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1);
|
||||
cpus_read_unlock();
|
||||
if (err) {
|
||||
pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
|
||||
|
|
|
@ -200,13 +200,13 @@ static void disable_nest_pmu_counters(void)
|
|||
|
||||
static void disable_core_pmu_counters(void)
|
||||
{
|
||||
cpumask_t cores_map;
|
||||
int cpu, rc;
|
||||
|
||||
cpus_read_lock();
|
||||
/* Disable the IMC Core functions */
|
||||
cores_map = cpu_online_cores_map();
|
||||
for_each_cpu(cpu, &cores_map) {
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu_first_thread_sibling(cpu) != cpu)
|
||||
continue;
|
||||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(cpu));
|
||||
if (rc)
|
||||
|
|
Loading…
Reference in New Issue