2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-04 07:20:27 +08:00
|
|
|
* TLB shootdown specifics for powerpc
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-04 07:20:27 +08:00
|
|
|
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
|
|
|
|
*/
|
2005-11-04 07:20:27 +08:00
|
|
|
#ifndef _ASM_POWERPC_TLB_H
|
|
|
|
#define _ASM_POWERPC_TLB_H
|
2005-12-17 05:43:46 +08:00
|
|
|
#ifdef __KERNEL__
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-04 07:20:27 +08:00
|
|
|
#ifndef __powerpc64__
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/pgtable.h>
|
2005-11-04 07:20:27 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/pgalloc.h>
|
2005-11-04 07:20:27 +08:00
|
|
|
#ifndef __powerpc64__
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/mmu.h>
|
2005-11-04 07:20:27 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-03 04:30:04 +08:00
|
|
|
#include <linux/pagemap.h>
|
|
|
|
|
2005-11-04 07:20:27 +08:00
|
|
|
#define tlb_start_vma(tlb, vma) do { } while (0)
|
|
|
|
#define tlb_end_vma(tlb, vma) do { } while (0)
|
2014-10-29 18:03:09 +08:00
|
|
|
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
|
2005-11-04 07:20:27 +08:00
|
|
|
|
2018-09-04 19:18:15 +08:00
|
|
|
#define tlb_flush tlb_flush
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void tlb_flush(struct mmu_gather *tlb);
|
|
|
|
|
|
|
|
/* Get the generic bits... */
|
|
|
|
#include <asm-generic/tlb.h>
|
|
|
|
|
|
|
|
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
|
|
|
|
unsigned long address);
|
|
|
|
|
|
|
|
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
|
2009-07-24 07:15:28 +08:00
|
|
|
unsigned long address)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-11-17 18:25:02 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
2005-04-17 06:20:36 +08:00
|
|
|
if (pte_val(*ptep) & _PAGE_HASHPTE)
|
|
|
|
flush_hash_entry(tlb->mm, ptep, address);
|
2009-07-24 07:15:28 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2016-07-13 17:36:39 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static inline int mm_is_core_local(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return cpumask_subset(mm_cpumask(mm),
|
|
|
|
topology_sibling_cpumask(smp_processor_id()));
|
|
|
|
}
|
2016-10-24 11:20:43 +08:00
|
|
|
|
2017-07-24 12:28:02 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
if (atomic_read(&mm->context.active_cpus) > 1)
|
|
|
|
return false;
|
|
|
|
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
|
|
|
|
}
|
powerpc/64s/radix: flush remote CPUs out of single-threaded mm_cpumask
When a single-threaded process has a non-local mm_cpumask, try to use
that point to flush the TLBs out of other CPUs in the cpumask.
An IPI is used for clearing remote CPUs for a few reasons:
- An IPI can end lazy TLB use of the mm, which is required to prevent
TLB entries being created on the remote CPU. The alternative is to
drop lazy TLB switching completely, which costs 7.5% in a context
switch ping-pong test betwee a process and kernel idle thread.
- An IPI can have remote CPUs flush the entire PID, but the local CPU
can flush a specific VA. tlbie would require over-flushing of the
local CPU (where the process is running).
- A single threaded process that is migrated to a different CPU is
likely to have a relatively small mm_cpumask, so IPI is reasonable.
No other thread can concurrently switch to this mm, because it must
have been given a reference to mm_users by the current thread before it
can use_mm. mm_users can be asynchronously incremented (by
mm_activate or mmget_not_zero), but those users must use remote mm
access and can't use_mm or access user address space. Existing code
makes the this assumption already, for example sparc64 has reset
mm_cpumask using this condition since the start of history, see
arch/sparc/kernel/smp_64.c.
This reduces tlbies for a kernel compile workload from 0.90M to 0.12M,
tlbiels are increased significantly due to the PID flushing for the
cleaning up remote CPUs, and increased local flushes (PID flushes take
128 tlbiels vs 1 tlbie).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-06-01 18:01:21 +08:00
|
|
|
static inline void mm_reset_thread_local(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
WARN_ON(atomic_read(&mm->context.copros) > 0);
|
|
|
|
/*
|
|
|
|
* It's possible for mm_access to take a reference on mm_users to
|
|
|
|
* access the remote mm from another thread, but it's not allowed
|
|
|
|
* to set mm_cpumask, so mm_users may be > 1 here.
|
|
|
|
*/
|
|
|
|
WARN_ON(current->mm != mm);
|
|
|
|
atomic_set(&mm->context.active_cpus, 1);
|
|
|
|
cpumask_clear(mm_cpumask(mm));
|
|
|
|
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
|
|
|
}
|
2017-07-24 12:28:02 +08:00
|
|
|
#else /* CONFIG_PPC_BOOK3S_64 */
|
2016-10-24 11:20:43 +08:00
|
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return cpumask_equal(mm_cpumask(mm),
|
|
|
|
cpumask_of(smp_processor_id()));
|
|
|
|
}
|
2017-07-24 12:28:02 +08:00
|
|
|
#endif /* !CONFIG_PPC_BOOK3S_64 */
|
2016-10-24 11:20:43 +08:00
|
|
|
|
2017-07-24 12:28:02 +08:00
|
|
|
#else /* CONFIG_SMP */
|
2016-07-13 17:36:39 +08:00
|
|
|
static inline int mm_is_core_local(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2016-10-24 11:20:43 +08:00
|
|
|
|
|
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2016-07-13 17:36:39 +08:00
|
|
|
#endif
|
|
|
|
|
2005-12-17 05:43:46 +08:00
|
|
|
#endif /* __KERNEL__ */
|
2005-11-04 07:20:27 +08:00
|
|
|
#endif /* __ASM_POWERPC_TLB_H */
|