x86/mm/tlb: Unify flush_tlb_func_local() and flush_tlb_func_remote()
The unification of these two functions allows to use them in the updated SMP infrastrucutre. To do so, remove the reason argument from flush_tlb_func_local(), add a member to struct tlb_flush_info that says which CPU initiated the flush and act accordingly. Optimize the size of flush_tlb_info while we are at it. Unfortunately, this prevents us from using a constant tlb_flush_info for arch_tlbbatch_flush(), but in a later stage we may be able to inline tlb_flush_info into the IPI data, so it should not have an impact eventually. Signed-off-by: Nadav Amit <namit@vmware.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lore.kernel.org/r/20210220231712.2475218-3-namit@vmware.com
This commit is contained in:
parent
a32a4d8a81
commit
4c1ba3923e
|
@ -201,8 +201,9 @@ struct flush_tlb_info {
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
u64 new_tlb_gen;
|
u64 new_tlb_gen;
|
||||||
unsigned int stride_shift;
|
unsigned int initiating_cpu;
|
||||||
bool freed_tables;
|
u8 stride_shift;
|
||||||
|
u8 freed_tables;
|
||||||
};
|
};
|
||||||
|
|
||||||
void flush_tlb_local(void);
|
void flush_tlb_local(void);
|
||||||
|
|
|
@ -439,7 +439,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
* NB: leave_mm() calls us with prev == NULL and tsk == NULL.
|
* NB: leave_mm() calls us with prev == NULL and tsk == NULL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* We don't want flush_tlb_func_* to run concurrently with us. */
|
/* We don't want flush_tlb_func() to run concurrently with us. */
|
||||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
|
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
|
||||||
|
@ -647,14 +647,13 @@ void initialize_tlbstate_and_flush(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* flush_tlb_func_common()'s memory ordering requirement is that any
|
* flush_tlb_func()'s memory ordering requirement is that any
|
||||||
* TLB fills that happen after we flush the TLB are ordered after we
|
* TLB fills that happen after we flush the TLB are ordered after we
|
||||||
* read active_mm's tlb_gen. We don't need any explicit barriers
|
* read active_mm's tlb_gen. We don't need any explicit barriers
|
||||||
* because all x86 flush operations are serializing and the
|
* because all x86 flush operations are serializing and the
|
||||||
* atomic64_read operation won't be reordered by the compiler.
|
* atomic64_read operation won't be reordered by the compiler.
|
||||||
*/
|
*/
|
||||||
static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
static void flush_tlb_func(void *info)
|
||||||
bool local, enum tlb_flush_reason reason)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We have three different tlb_gen values in here. They are:
|
* We have three different tlb_gen values in here. They are:
|
||||||
|
@ -665,14 +664,26 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
||||||
* - f->new_tlb_gen: the generation that the requester of the flush
|
* - f->new_tlb_gen: the generation that the requester of the flush
|
||||||
* wants us to catch up to.
|
* wants us to catch up to.
|
||||||
*/
|
*/
|
||||||
|
const struct flush_tlb_info *f = info;
|
||||||
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
|
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
|
||||||
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
|
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
|
||||||
u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
|
u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
|
||||||
u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
|
u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
|
||||||
|
bool local = smp_processor_id() == f->initiating_cpu;
|
||||||
|
unsigned long nr_invalidate = 0;
|
||||||
|
|
||||||
/* This code cannot presently handle being reentered. */
|
/* This code cannot presently handle being reentered. */
|
||||||
VM_WARN_ON(!irqs_disabled());
|
VM_WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
|
if (!local) {
|
||||||
|
inc_irq_stat(irq_tlb_count);
|
||||||
|
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
||||||
|
|
||||||
|
/* Can only happen on remote CPUs */
|
||||||
|
if (f->mm && f->mm != loaded_mm)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(loaded_mm == &init_mm))
|
if (unlikely(loaded_mm == &init_mm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -700,8 +711,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
||||||
* be handled can catch us all the way up, leaving no work for
|
* be handled can catch us all the way up, leaving no work for
|
||||||
* the second flush.
|
* the second flush.
|
||||||
*/
|
*/
|
||||||
trace_tlb_flush(reason, 0);
|
goto done;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
|
WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
|
||||||
|
@ -748,46 +758,34 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
||||||
f->new_tlb_gen == local_tlb_gen + 1 &&
|
f->new_tlb_gen == local_tlb_gen + 1 &&
|
||||||
f->new_tlb_gen == mm_tlb_gen) {
|
f->new_tlb_gen == mm_tlb_gen) {
|
||||||
/* Partial flush */
|
/* Partial flush */
|
||||||
unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
|
|
||||||
unsigned long addr = f->start;
|
unsigned long addr = f->start;
|
||||||
|
|
||||||
|
nr_invalidate = (f->end - f->start) >> f->stride_shift;
|
||||||
|
|
||||||
while (addr < f->end) {
|
while (addr < f->end) {
|
||||||
flush_tlb_one_user(addr);
|
flush_tlb_one_user(addr);
|
||||||
addr += 1UL << f->stride_shift;
|
addr += 1UL << f->stride_shift;
|
||||||
}
|
}
|
||||||
if (local)
|
if (local)
|
||||||
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
|
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
|
||||||
trace_tlb_flush(reason, nr_invalidate);
|
|
||||||
} else {
|
} else {
|
||||||
/* Full flush. */
|
/* Full flush. */
|
||||||
|
nr_invalidate = TLB_FLUSH_ALL;
|
||||||
|
|
||||||
flush_tlb_local();
|
flush_tlb_local();
|
||||||
if (local)
|
if (local)
|
||||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||||
trace_tlb_flush(reason, TLB_FLUSH_ALL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Both paths above update our state to mm_tlb_gen. */
|
/* Both paths above update our state to mm_tlb_gen. */
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
|
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
|
||||||
}
|
|
||||||
|
|
||||||
static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
|
/* Tracing is done in a unified manner to reduce the code size */
|
||||||
{
|
done:
|
||||||
const struct flush_tlb_info *f = info;
|
trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN :
|
||||||
|
(f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN :
|
||||||
flush_tlb_func_common(f, true, reason);
|
TLB_LOCAL_MM_SHOOTDOWN,
|
||||||
}
|
nr_invalidate);
|
||||||
|
|
||||||
static void flush_tlb_func_remote(void *info)
|
|
||||||
{
|
|
||||||
const struct flush_tlb_info *f = info;
|
|
||||||
|
|
||||||
inc_irq_stat(irq_tlb_count);
|
|
||||||
|
|
||||||
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
|
|
||||||
return;
|
|
||||||
|
|
||||||
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
|
||||||
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tlb_is_not_lazy(int cpu, void *data)
|
static bool tlb_is_not_lazy(int cpu, void *data)
|
||||||
|
@ -816,10 +814,10 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
|
||||||
* doing a speculative memory access.
|
* doing a speculative memory access.
|
||||||
*/
|
*/
|
||||||
if (info->freed_tables)
|
if (info->freed_tables)
|
||||||
smp_call_function_many(cpumask, flush_tlb_func_remote,
|
smp_call_function_many(cpumask, flush_tlb_func,
|
||||||
(void *)info, 1);
|
(void *)info, 1);
|
||||||
else
|
else
|
||||||
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
|
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
|
||||||
(void *)info, 1, cpumask);
|
(void *)info, 1, cpumask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -869,6 +867,7 @@ static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
|
||||||
info->stride_shift = stride_shift;
|
info->stride_shift = stride_shift;
|
||||||
info->freed_tables = freed_tables;
|
info->freed_tables = freed_tables;
|
||||||
info->new_tlb_gen = new_tlb_gen;
|
info->new_tlb_gen = new_tlb_gen;
|
||||||
|
info->initiating_cpu = smp_processor_id();
|
||||||
|
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
@ -908,7 +907,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
|
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
|
||||||
lockdep_assert_irqs_enabled();
|
lockdep_assert_irqs_enabled();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
|
flush_tlb_func(info);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1119,34 +1118,26 @@ void __flush_tlb_all(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__flush_tlb_all);
|
EXPORT_SYMBOL_GPL(__flush_tlb_all);
|
||||||
|
|
||||||
/*
|
|
||||||
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
|
|
||||||
* This means that the 'struct flush_tlb_info' that describes which mappings to
|
|
||||||
* flush is actually fixed. We therefore set a single fixed struct and use it in
|
|
||||||
* arch_tlbbatch_flush().
|
|
||||||
*/
|
|
||||||
static const struct flush_tlb_info full_flush_tlb_info = {
|
|
||||||
.mm = NULL,
|
|
||||||
.start = 0,
|
|
||||||
.end = TLB_FLUSH_ALL,
|
|
||||||
};
|
|
||||||
|
|
||||||
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||||
{
|
{
|
||||||
|
struct flush_tlb_info *info;
|
||||||
|
|
||||||
int cpu = get_cpu();
|
int cpu = get_cpu();
|
||||||
|
|
||||||
|
info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0);
|
||||||
if (cpumask_test_cpu(cpu, &batch->cpumask)) {
|
if (cpumask_test_cpu(cpu, &batch->cpumask)) {
|
||||||
lockdep_assert_irqs_enabled();
|
lockdep_assert_irqs_enabled();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
|
flush_tlb_func(info);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
|
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
|
||||||
flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
|
flush_tlb_others(&batch->cpumask, info);
|
||||||
|
|
||||||
cpumask_clear(&batch->cpumask);
|
cpumask_clear(&batch->cpumask);
|
||||||
|
|
||||||
|
put_flush_tlb_info();
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue