csky: Use generic asid algorithm to implement switch_mm
Use linux generic asid/vmid algorithm to implement csky switch_mm function. The algorithm is from arm and it could work with SMP system. It'll help reduce tlb flush for switch_mm in task/vm switch. Signed-off-by: Guo Ren <ren_guo@c-sky.com> Cc: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
parent
a231b8839c
commit
22d55f02b8
|
@ -78,6 +78,12 @@ static inline void tlb_invalid_all(void)
|
|||
cpwcr("cpcr8", 0x04000000);
|
||||
}
|
||||
|
||||
|
||||
static inline void local_tlb_invalid_all(void)
|
||||
{
|
||||
tlb_invalid_all();
|
||||
}
|
||||
|
||||
static inline void tlb_invalid_indexed(void)
|
||||
{
|
||||
cpwcr("cpcr8", 0x02000000);
|
||||
|
|
|
@ -85,6 +85,16 @@ static inline void tlb_invalid_all(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void local_tlb_invalid_all(void)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_TLBI
|
||||
asm volatile("tlbi.all\n":::"memory");
|
||||
sync_is();
|
||||
#else
|
||||
tlb_invalid_all();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tlb_invalid_indexed(void)
|
||||
{
|
||||
mtcr("cr<8, 15>", 0x02000000);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#define __ASM_CSKY_MMU_H
|
||||
|
||||
typedef struct {
|
||||
atomic64_t asid;
|
||||
void *vdso;
|
||||
} mm_context_t;
|
||||
|
||||
|
|
|
@ -20,20 +20,28 @@
|
|||
#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
|
||||
setup_pgd(__pa(pgd), true)
|
||||
|
||||
#define init_new_context(tsk,mm) 0
|
||||
#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1)
|
||||
#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
|
||||
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, current)
|
||||
|
||||
#define destroy_context(mm) do {} while (0)
|
||||
#define enter_lazy_tlb(mm, tsk) do {} while (0)
|
||||
#define deactivate_mm(tsk, mm) do {} while (0)
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
||||
|
||||
static inline void
|
||||
switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (prev != next)
|
||||
tlb_invalid_all();
|
||||
check_and_switch_context(next, cpu);
|
||||
|
||||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||
write_mmu_entryhi(next->context.asid.counter);
|
||||
}
|
||||
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
|
||||
|
|
|
@ -13,3 +13,4 @@ obj-y += ioremap.o
|
|||
obj-y += syscache.o
|
||||
obj-y += tlb.o
|
||||
obj-y += asid.o
|
||||
obj-y += context.o
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/asid.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static DEFINE_PER_CPU(atomic64_t, active_asids);
|
||||
static DEFINE_PER_CPU(u64, reserved_asids);
|
||||
|
||||
struct asid_info asid_info;
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
|
||||
{
|
||||
asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
|
||||
}
|
||||
|
||||
static void asid_flush_cpu_ctxt(void)
|
||||
{
|
||||
local_tlb_invalid_all();
|
||||
}
|
||||
|
||||
static int asids_init(void)
|
||||
{
|
||||
BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus());
|
||||
|
||||
if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1,
|
||||
asid_flush_cpu_ctxt))
|
||||
panic("Unable to initialize ASID allocator for %lu ASIDs\n",
|
||||
NUM_ASIDS(&asid_info));
|
||||
|
||||
asid_info.active = &active_asids;
|
||||
asid_info.reserved = &reserved_asids;
|
||||
|
||||
pr_info("ASID allocator initialised with %lu entries\n",
|
||||
NUM_CTXT_ASIDS(&asid_info));
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(asids_init);
|
Loading…
Reference in New Issue