OpenCloudOS-Kernel/arch/mips/mm/tlb-r4k.c

584 lines
14 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/cpu_pm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/tlbmisc.h>
extern void build_tlb_refill_handler(void);
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
* a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
* itlb/dtlb are not totally transparent to software.
*/
static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB);
break;
case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
break;
}
}
static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_EXEC)
flush_micro_tlb();
}
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry, ftlbhighset;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_stop();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
MIPS: Mask out limit field when calculating wired entry count Since MIPSr6 the Wired register is split into 2 fields, with the upper 16 bits of the register indicating a limit on the value that the wired entry count in the bottom 16 bits of the register can take. This means that simply reading the wired register doesn't get us a valid TLB entry index any longer, and we instead need to retrieve only the lower 16 bits of the register. Introduce a new num_wired_entries() function which does this on MIPSr6 or higher and simply returns the value of the wired register on older architecture revisions, and make use of it when reading the number of wired entries. Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries, don't use TLBINVF") we have been using a non-zero number of wired entries to determine whether we should avoid use of the tlbinvf instruction (which would invalidate wired entries) and instead loop over TLB entries in local_flush_tlb_all(). This loop begins with the number of wired entries, or before this patch some large bogus TLB index on MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems with FTLBs have been prone to leaving stale address translations in the FTLB & crashing in various weird & wonderful ways when we later observe the wrong memory. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14557/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-12 09:26:07 +08:00
entry = num_wired_entries();
/*
* Blast 'em all away.
* If there are any wired entries, fall back to iterating
*/
if (cpu_has_tlbinv && !entry) {
if (current_cpu_data.tlbsizevtlb) {
write_c0_index(0);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate VTLB */
}
ftlbhighset = current_cpu_data.tlbsizevtlb +
current_cpu_data.tlbsizeftlbsets;
for (entry = current_cpu_data.tlbsizevtlb;
entry < ftlbhighset;
entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate one FTLB set */
}
} else {
while (entry < current_cpu_data.tlbsize) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
local_irq_save(flags);
MIPS: Avoid mcheck by flushing page range in huge_ptep_set_access_flags() Problem: 1) Huge page mapping of anonymous memory is initially invalid. Will be faulted in by copy-on-write mechanism. 2) Userspace attempts store at the end of the huge mapping. 3) TLB Refill exception handler fill TLB with a normal (4K sized) invalid page at the end of the huge mapping virtual address range. 4) Userspace restarted, and re-attempts the store at the end of the huge mapping. 5) Page from #3 is invalid, we get a fault and go to the hugepage fault handler. This tries to map a huge page and calls huge_ptep_set_access_flags() to install the mapping. 6) We just call the generic ptep_set_access_flags() to set up the page tables, but the flush there assumes a normal (4K sized) page and only tries to flush the first part of the huge page virtual address out of the TLB, since the existing entry from step #3 doesn't conflict, nothing is flushed. 7) We attempt to load the mapping into the TLB, but because it conflicts with the entry from step #3, we get a Machine Check exception. The fix: Flush the entire rage covered by the huge page in huge_ptep_set_access_flags(), and remove the optimization in local_flush_tlb_range() so that the flush actually does the correct thing. Signed-off-by: David Daney <david.daney@cavium.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Hillf Danton <dhillf@gmail.com> Patchwork: https://patchwork.linux-mips.org/patch/4661/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org> (cherry picked from commit dd617f258cc39d36be26afee9912624a2d23112c)
2012-12-04 04:44:26 +08:00
start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1);
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
treewide: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. In preparation for removing[2] the[3] macro[4], remove all remaining needless uses with the following script: git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \ xargs perl -pi -e \ 's/\buninitialized_var\(([^\)]+)\)/\1/g; s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;' drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid pathological white-space. No outstanding warnings were found building allmodconfig with GCC 9.3.0 for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64, alpha, and m68k. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5 Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-04 04:09:38 +08:00
unsigned long old_entryhi, old_mmid;
int newpid = cpu_asid(cpu, mm);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
old_entryhi = read_c0_entryhi();
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(newpid);
}
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_stop();
while (start < end) {
int idx;
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
if (cpu_has_mmid)
write_c0_entryhi(start);
else
write_c0_entryhi(start | newpid);
MIPS: Avoid mcheck by flushing page range in huge_ptep_set_access_flags() Problem: 1) Huge page mapping of anonymous memory is initially invalid. Will be faulted in by copy-on-write mechanism. 2) Userspace attempts store at the end of the huge mapping. 3) TLB Refill exception handler fill TLB with a normal (4K sized) invalid page at the end of the huge mapping virtual address range. 4) Userspace restarted, and re-attempts the store at the end of the huge mapping. 5) Page from #3 is invalid, we get a fault and go to the hugepage fault handler. This tries to map a huge page and calls huge_ptep_set_access_flags() to install the mapping. 6) We just call the generic ptep_set_access_flags() to set up the page tables, but the flush there assumes a normal (4K sized) page and only tries to flush the first part of the huge page virtual address out of the TLB, since the existing entry from step #3 doesn't conflict, nothing is flushed. 7) We attempt to load the mapping into the TLB, but because it conflicts with the entry from step #3, we get a Machine Check exception. The fix: Flush the entire rage covered by the huge page in huge_ptep_set_access_flags(), and remove the optimization in local_flush_tlb_range() so that the flush actually does the correct thing. Signed-off-by: David Daney <david.daney@cavium.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Hillf Danton <dhillf@gmail.com> Patchwork: https://patchwork.linux-mips.org/patch/4661/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org> (cherry picked from commit dd617f258cc39d36be26afee9912624a2d23112c)
2012-12-04 04:44:26 +08:00
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_start();
} else {
drop_mmu_context(mm);
}
flush_micro_tlb();
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
int pid = read_c0_entryhi();
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_stop();
while (start < end) {
int idx;
write_c0_entryhi(start);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
continue;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
tlbw_use_hazard();
write_c0_entryhi(pid);
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_start();
} else {
local_flush_tlb_all();
}
flush_micro_tlb();
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
treewide: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. In preparation for removing[2] the[3] macro[4], remove all remaining needless uses with the following script: git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \ xargs perl -pi -e \ 's/\buninitialized_var\(([^\)]+)\)/\1/g; s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;' drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid pathological white-space. No outstanding warnings were found building allmodconfig with GCC 9.3.0 for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64, alpha, and m68k. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5 Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-04 04:09:38 +08:00
unsigned long old_mmid;
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
unsigned long flags, old_entryhi;
int idx;
page &= (PAGE_MASK << 1);
local_irq_save(flags);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
old_entryhi = read_c0_entryhi();
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_stop();
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_entryhi(page);
write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
} else {
write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
}
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx < 0)
goto finish;
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
finish:
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
write_c0_entryhi(old_entryhi);
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void local_flush_tlb_one(unsigned long page)
{
unsigned long flags;
int oldpid, idx;
local_irq_save(flags);
oldpid = read_c0_entryhi();
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_stop();
page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
if (idx >= 0) {
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
/*
* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
htw_stop();
address &= (PAGE_MASK << 1);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
if (cpu_has_mmid) {
write_c0_entryhi(address);
} else {
pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entryhi(address | pid);
}
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
p4dp = p4d_offset(pgdp, address);
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
lo = pte_to_entrylo(pte_val(*ptep));
write_c0_entrylo0(lo);
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK);
} else
#endif
{
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
if (cpu_has_xpa)
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
if (cpu_has_xpa)
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
#endif
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
}
tlbw_use_hazard();
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
treewide: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. In preparation for removing[2] the[3] macro[4], remove all remaining needless uses with the following script: git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \ xargs perl -pi -e \ 's/\buninitialized_var\(([^\)]+)\)/\1/g; s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;' drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid pathological white-space. No outstanding warnings were found building allmodconfig with GCC 9.3.0 for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64, alpha, and m68k. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5 Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-04 04:09:38 +08:00
unsigned int old_mmid;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
}
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_stop();
old_pagemask = read_c0_pagemask();
MIPS: Mask out limit field when calculating wired entry count Since MIPSr6 the Wired register is split into 2 fields, with the upper 16 bits of the register indicating a limit on the value that the wired entry count in the bottom 16 bits of the register can take. This means that simply reading the wired register doesn't get us a valid TLB entry index any longer, and we instead need to retrieve only the lower 16 bits of the register. Introduce a new num_wired_entries() function which does this on MIPSr6 or higher and simply returns the value of the wired register on older architecture revisions, and make use of it when reading the number of wired entries. Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries, don't use TLBINVF") we have been using a non-zero number of wired entries to determine whether we should avoid use of the tlbinvf instruction (which would invalidate wired entries) and instead loop over TLB entries in local_flush_tlb_all(). This loop begins with the number of wired entries, or before this patch some large bogus TLB index on MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems with FTLBs have been prone to leaving stale address translations in the FTLB & crashing in various weird & wonderful ways when we later observe the wrong memory. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14557/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-12 09:26:07 +08:00
wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
MIPS: MemoryMapID (MMID) Support Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
MIPS: mm: Use the Hardware Page Table Walker if the core supports it The Hardware Page Table Walker aims to speed up TLB refill exceptions by handling them in the hardware level instead of having a software TLB refill handler. However, a TLB refill exception can still be thrown in certain cases such as, synchronus exceptions, or address translation or memory errors during the HTW operation. As a result of which, HTW must not be considered a complete replacement for the TLB refill software handler, but rather a fast-path for it. For HTW to work, the PWBase register must contain the task's page global directory address so the HTW will kick in on TLB refill exceptions. Due to HTW being a separate engine embedded deep in the CPU pipeline, we need to restart the HTW everytime a PTE changes to avoid HTW fetching a old entry from the page tables. It's also necessary to restart the HTW on context switches to prevent it from fetching a page from the previous process. Finally, since HTW is using the entryhi register to write the translations to the TLB, it's necessary to stop the HTW whenever the entryhi changes (eg for tlb probe perations) and enable it back afterwards. == Performance == The following trivial test was used to measure the performance of the HTW. Using the same root filesystem, the following command was used to measure the number of tlb refill handler executions with and without (using 'nohtw' kernel parameter) HTW support. The kernel was modified to use a scratch register as a counter for the TLB refill exceptions. find /usr -type f -exec ls -lh {} \; HTW Enabled: TLB refill exceptions: 12306 HTW Disabled: TLB refill exceptions: 17805 Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Markos Chandras <markos.chandras@imgtec.com> Patchwork: https://patchwork.linux-mips.org/patch/7336/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
htw_start();
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
arch: fix has_transparent_hugepage() I've just discovered that the useful-sounding has_transparent_hugepage() is actually an architecture-dependent minefield: on some arches it only builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when not, but on some of those (arm and arm64) it then gives the wrong answer; and on mips alone it's marked __init, which would crash if called later (but so far it has not been called later). Straighten this out: make it available to all configs, with a sensible default in asm-generic/pgtable.h, removing its definitions from those arches (arc, arm, arm64, sparc, tile) which are served by the default, adding #define has_transparent_hugepage has_transparent_hugepage to those (mips, powerpc, s390, x86) which need to override the default at runtime, and removing the __init from mips (but maybe that kind of code should be avoided after init: set a static variable the first time it's called). Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc] Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390] Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
int has_transparent_hugepage(void)
{
arch: fix has_transparent_hugepage() I've just discovered that the useful-sounding has_transparent_hugepage() is actually an architecture-dependent minefield: on some arches it only builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when not, but on some of those (arm and arm64) it then gives the wrong answer; and on mips alone it's marked __init, which would crash if called later (but so far it has not been called later). Straighten this out: make it available to all configs, with a sensible default in asm-generic/pgtable.h, removing its definitions from those arches (arc, arm, arm64, sparc, tile) which are served by the default, adding #define has_transparent_hugepage has_transparent_hugepage to those (mips, powerpc, s390, x86) which need to override the default at runtime, and removing the __init from mips (but maybe that kind of code should be avoided after init: set a static variable the first time it's called). Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc] Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390] Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
static unsigned int mask = -1;
arch: fix has_transparent_hugepage() I've just discovered that the useful-sounding has_transparent_hugepage() is actually an architecture-dependent minefield: on some arches it only builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when not, but on some of those (arm and arm64) it then gives the wrong answer; and on mips alone it's marked __init, which would crash if called later (but so far it has not been called later). Straighten this out: make it available to all configs, with a sensible default in asm-generic/pgtable.h, removing its definitions from those arches (arc, arm, arm64, sparc, tile) which are served by the default, adding #define has_transparent_hugepage has_transparent_hugepage to those (mips, powerpc, s390, x86) which need to override the default at runtime, and removing the __init from mips (but maybe that kind of code should be avoided after init: set a static variable the first time it's called). Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc] Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390] Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
if (mask == -1) { /* first call comes during __init */
unsigned long flags;
arch: fix has_transparent_hugepage() I've just discovered that the useful-sounding has_transparent_hugepage() is actually an architecture-dependent minefield: on some arches it only builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when not, but on some of those (arm and arm64) it then gives the wrong answer; and on mips alone it's marked __init, which would crash if called later (but so far it has not been called later). Straighten this out: make it available to all configs, with a sensible default in asm-generic/pgtable.h, removing its definitions from those arches (arc, arm, arm64, sparc, tile) which are served by the default, adding #define has_transparent_hugepage has_transparent_hugepage to those (mips, powerpc, s390, x86) which need to override the default at runtime, and removing the __init from mips (but maybe that kind of code should be avoided after init: set a static variable the first time it's called). Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc] Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390] Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
local_irq_save(flags);
write_c0_pagemask(PM_HUGE_MASK);
back_to_back_c0_hazard();
mask = read_c0_pagemask();
write_c0_pagemask(PM_DEFAULT_MASK);
local_irq_restore(flags);
}
return mask == PM_HUGE_MASK;
}
EXPORT_SYMBOL(has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Used for loading TLB entries before trap_init() has started, when we
* don't actually want to add a wired entry which remains throughout the
* lifetime of the system
*/
int temp_tlb_entry;
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
int ret = 0;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
MIPS: Mask out limit field when calculating wired entry count Since MIPSr6 the Wired register is split into 2 fields, with the upper 16 bits of the register indicating a limit on the value that the wired entry count in the bottom 16 bits of the register can take. This means that simply reading the wired register doesn't get us a valid TLB entry index any longer, and we instead need to retrieve only the lower 16 bits of the register. Introduce a new num_wired_entries() function which does this on MIPSr6 or higher and simply returns the value of the wired register on older architecture revisions, and make use of it when reading the number of wired entries. Since commit e710d6668309 ("MIPS: tlb-r4k: If there are wired entries, don't use TLBINVF") we have been using a non-zero number of wired entries to determine whether we should avoid use of the tlbinvf instruction (which would invalidate wired entries) and instead loop over TLB entries in local_flush_tlb_all(). This loop begins with the number of wired entries, or before this patch some large bogus TLB index on MIPSr6 systems. Thus since the aforementioned commit some MIPSr6 systems with FTLBs have been prone to leaving stale address translations in the FTLB & crashing in various weird & wonderful ways when we later observe the wrong memory. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14557/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-12 09:26:07 +08:00
wired = num_wired_entries();
if (--temp_tlb_entry < wired) {
printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
ret = -ENOSPC;
goto out;
}
write_c0_index(temp_tlb_entry);
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
htw_start();
out:
local_irq_restore(flags);
return ret;
}
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code commit 3747069b25e419f6b51395f48127e9812abc3596 upstream. The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) and are flagged as __cpuinit -- so if we remove the __cpuinit from the arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit related content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. Here, we remove all the MIPS __cpuinit from C code and __CPUINIT from asm files. MIPS is interesting in this respect, because there are also uasm users hiding behind their own renamed versions of the __cpuinit macros. [1] https://lkml.org/lkml/2013/5/20/589 [ralf@linux-mips.org: Folded in Paul's followup fix.] Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/5494/ Patchwork: https://patchwork.linux-mips.org/patch/5495/ Patchwork: https://patchwork.linux-mips.org/patch/5509/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
static int ntlb;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
return 1;
}
__setup("ntlb=", set_ntlb);
/*
* Configure TLB (for init or after a CPU has been powered off).
*/
static void r4k_tlb_configure(void)
{
/*
* You should never change this register:
* - On R4600 1.7 the tlbp never hits for pages smaller than
* the value in the c0_pagemask register.
* - The entire mm handling assumes the c0_pagemask register to
* be set to fixed-size pages.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
back_to_back_c0_hazard();
if (read_c0_pagemask() != PM_DEFAULT_MASK)
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
current_cpu_type() == CPU_R14000 ||
current_cpu_type() == CPU_R16000)
write_c0_framemask(0);
if (cpu_has_rixi) {
/*
* Enable the no read, no exec bits, and enable large physical
* address.
*/
#ifdef CONFIG_64BIT
set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
#else
set_c0_pagegrain(PG_RIE | PG_XIE);
#endif
}
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
}
void tlb_init(void)
{
r4k_tlb_configure();
if (ntlb) {
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
int wired = current_cpu_data.tlbsize - ntlb;
write_c0_wired(wired);
write_c0_index(wired-1);
printk("Restricting TLB to %d entries\n", ntlb);
} else
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
}
build_tlb_refill_handler();
}
static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
r4k_tlb_configure();
break;
}
return NOTIFY_OK;
}
static struct notifier_block r4k_tlb_pm_notifier_block = {
.notifier_call = r4k_tlb_pm_notifier,
};
static int __init r4k_tlb_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
}
arch_initcall(r4k_tlb_init_pm);