Revert "riscv: mm: notify remote harts about mmu cache updates"

This reverts the remaining bits of commit 4bd1d80efb ("riscv: mm:
notify remote harts harts about mmu cache updates").

According to bug reports, suggested approach to fix stale TLB entries
is not sufficient. It needs to be replaced by a more robust solution.

Fixes: 4bd1d80efb ("riscv: mm: notify remote harts about mmu cache updates")
Reported-by: Zong Li <zong.li@sifive.com>
Reported-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
Cc: stable@vger.kernel.org
Reviewed-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230226150137.1919750-2-geomatsi@gmail.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Sergey Matyukevich 2023-02-26 18:01:36 +03:00 committed by Palmer Dabbelt
parent 1b929c02af
commit e921050022
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
4 changed files with 17 additions and 41 deletions

View File

@ -19,8 +19,6 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
/* A local tlb flush is needed before user execution can resume. */
cpumask_t tlb_stale_mask;
#endif
} mm_context_t;

View File

@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)

View File

@ -196,16 +196,6 @@ switch_mm_fast:
if (need_flush_tlb)
local_flush_tlb_all();
#ifdef CONFIG_SMP
else {
cpumask_t *mask = &mm->context.tlb_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
local_flush_tlb_all_asid(cntx & asid_mask);
}
}
#endif
}
static void set_mm_noasid(struct mm_struct *mm)

View File

@ -5,7 +5,23 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
void flush_tlb_all(void)
{
@ -15,7 +31,6 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id);
/*
* TLB will be immediately flushed on harts concurrently
* executing this MM context. TLB flush on other harts
* is deferred until this MM context migrates there.
*/
cpumask_setall(pmask);
cpumask_clear_cpu(cpuid, pmask);
cpumask_andnot(pmask, pmask, cmask);
if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) {