Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: "Nothing interesting. A patch to convert the remaining __get_cpu_var() users, another to fix non-critical off-by-one in an assertion and a cosmetic conversion to lockless_dereference() in percpu-ref. The back-merge from mainline is to receive lockless_dereference()" * 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: Replace smp_read_barrier_depends() with lockless_dereference() percpu: Convert remaining __get_cpu_var uses in 3.18-rcX percpu: off by one in BUG_ON()
This commit is contained in:
commit
eedb3d3304
|
@ -511,7 +511,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
|
|||
|
||||
static int psci_suspend_finisher(unsigned long index)
|
||||
{
|
||||
struct psci_power_state *state = __get_cpu_var(psci_power_state);
|
||||
struct psci_power_state *state = __this_cpu_read(psci_power_state);
|
||||
|
||||
return psci_ops.cpu_suspend(state[index - 1],
|
||||
virt_to_phys(cpu_resume));
|
||||
|
@ -520,7 +520,7 @@ static int psci_suspend_finisher(unsigned long index)
|
|||
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
|
||||
{
|
||||
int ret;
|
||||
struct psci_power_state *state = __get_cpu_var(psci_power_state);
|
||||
struct psci_power_state *state = __this_cpu_read(psci_power_state);
|
||||
/*
|
||||
* idle state index 0 corresponds to wfi, should never be called
|
||||
* from the cpu_suspend operations
|
||||
|
|
|
@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
|
|||
static inline bool __ref_is_percpu(struct percpu_ref *ref,
|
||||
unsigned long __percpu **percpu_countp)
|
||||
{
|
||||
unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
|
||||
|
||||
/* paired with smp_store_release() in percpu_ref_reinit() */
|
||||
smp_read_barrier_depends();
|
||||
unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
|
||||
|
||||
/*
|
||||
* Theoretically, the following could test just ATOMIC; however,
|
||||
|
|
|
@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
|
|||
|
||||
void irq_work_tick(void)
|
||||
{
|
||||
struct llist_head *raised = &__get_cpu_var(raised_list);
|
||||
struct llist_head *raised = this_cpu_ptr(&raised_list);
|
||||
|
||||
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
|
||||
irq_work_run_list(raised);
|
||||
irq_work_run_list(&__get_cpu_var(lazy_list));
|
||||
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -235,7 +235,7 @@ void tick_nohz_full_kick(void)
|
|||
if (!tick_nohz_full_cpu(smp_processor_id()))
|
||||
return;
|
||||
|
||||
irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
|
||||
irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1591,7 +1591,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
|
|||
if (cpu == NR_CPUS)
|
||||
continue;
|
||||
|
||||
PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
|
||||
PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
|
||||
PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
|
||||
PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
|
||||
|
||||
|
|
Loading…
Reference in New Issue