ARM: 7769/1: Cortex-A15: fix erratum 798181 implementation
Looking into the active_asids array is not enough, as we also need to look into the reserved_asids array (they both represent processes that are currently running). Also, not holding the ASID allocator lock is racy, as another CPU could schedule that process and trigger a rollover, making the erratum workaround miss an IPI. Exposing this outside of context.c is a little ugly on the side, so let's define a new entry point that the erratum workaround can call to obtain the cpumask. Cc: <stable@vger.kernel.org> # 3.9 Acked-by: Will Deacon <will.deacon@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
b8e4a4740f
commit
0d0752bca1
|
@ -28,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm);
|
|||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
|
||||
DECLARE_PER_CPU(atomic64_t, active_asids);
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
|
||||
cpumask_t *mask);
|
||||
#else /* !CONFIG_ARM_ERRATA_798181 */
|
||||
static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
|
||||
cpumask_t *mask)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARM_ERRATA_798181 */
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
|
|||
|
||||
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
|
||||
{
|
||||
int cpu, this_cpu;
|
||||
int this_cpu;
|
||||
cpumask_t mask = { CPU_BITS_NONE };
|
||||
|
||||
if (!erratum_a15_798181())
|
||||
|
@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
|
|||
|
||||
dummy_flush_tlb_a15_erratum();
|
||||
this_cpu = get_cpu();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
/*
|
||||
* We only need to send an IPI if the other CPUs are running
|
||||
* the same ASID as the one being invalidated. There is no
|
||||
* need for locking around the active_asids check since the
|
||||
* switch_mm() function has at least one dmb() (as required by
|
||||
* this workaround) in case a context switch happens on
|
||||
* another CPU after the condition below.
|
||||
*/
|
||||
if (atomic64_read(&mm->context.id) ==
|
||||
atomic64_read(&per_cpu(active_asids, cpu)))
|
||||
cpumask_set_cpu(cpu, &mask);
|
||||
}
|
||||
a15_erratum_get_cpumask(this_cpu, mm, &mask);
|
||||
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
|
||||
put_cpu();
|
||||
}
|
||||
|
|
|
@ -45,10 +45,37 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
|||
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
|
||||
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
|
||||
|
||||
DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
static DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
static DEFINE_PER_CPU(u64, reserved_asids);
|
||||
static cpumask_t tlb_flush_pending;
|
||||
|
||||
#ifdef CONFIG_ARM_ERRATA_798181
|
||||
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
|
||||
cpumask_t *mask)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
u64 context_id, asid;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
|
||||
context_id = mm->context.id.counter;
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
/*
|
||||
* We only need to send an IPI if the other CPUs are
|
||||
* running the same ASID as the one being invalidated.
|
||||
*/
|
||||
asid = per_cpu(active_asids, cpu).counter;
|
||||
if (asid == 0)
|
||||
asid = per_cpu(reserved_asids, cpu);
|
||||
if (context_id == asid)
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
static void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue