2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2011-04-05 05:15:29 +08:00
|
|
|
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
|
|
|
|
* Carsten Langgaard, carstenl@mips.com
|
|
|
|
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
|
|
|
|
*/
|
2014-03-01 01:09:20 +08:00
|
|
|
#include <linux/cpu_pm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/sched.h>
|
2009-06-19 21:05:26 +08:00
|
|
|
#include <linux/smp.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/mm.h>
|
2009-05-28 08:47:44 +08:00
|
|
|
#include <linux/hugetlb.h>
|
2016-08-22 03:58:14 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/cpu.h>
|
2013-09-17 16:25:47 +08:00
|
|
|
#include <asm/cpu-type.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/bootinfo.h>
|
2015-07-14 00:12:44 +08:00
|
|
|
#include <asm/hazards.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/pgtable.h>
|
2013-11-15 00:12:22 +08:00
|
|
|
#include <asm/tlb.h>
|
2011-11-29 00:11:28 +08:00
|
|
|
#include <asm/tlbmisc.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern void build_tlb_refill_handler(void);
|
|
|
|
|
2007-06-06 14:52:43 +08:00
|
|
|
/*
|
2016-03-03 09:45:11 +08:00
|
|
|
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
|
|
|
|
* a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
|
|
|
|
* itlb/dtlb are not totally transparent to software.
|
2007-06-06 14:52:43 +08:00
|
|
|
*/
|
2016-03-03 09:45:11 +08:00
|
|
|
static inline void flush_micro_tlb(void)
|
2013-09-26 00:21:26 +08:00
|
|
|
{
|
|
|
|
switch (current_cpu_type()) {
|
|
|
|
case CPU_LOONGSON2:
|
2016-03-03 09:45:11 +08:00
|
|
|
write_c0_diag(LOONGSON_DIAG_ITLB);
|
|
|
|
break;
|
2014-03-21 18:44:00 +08:00
|
|
|
case CPU_LOONGSON3:
|
2016-03-03 09:45:11 +08:00
|
|
|
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
|
2013-09-26 00:21:26 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-06-06 14:52:43 +08:00
|
|
|
|
2016-03-03 09:45:11 +08:00
|
|
|
static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
|
2013-09-26 00:21:26 +08:00
|
|
|
{
|
|
|
|
if (vma->vm_flags & VM_EXEC)
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb();
|
2013-09-26 00:21:26 +08:00
|
|
|
}
|
2007-06-06 14:52:43 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
void local_flush_tlb_all(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long old_ctx;
|
2013-11-15 00:12:31 +08:00
|
|
|
int entry, ftlbhighset;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Save old context and create impossible VPN2 value */
|
|
|
|
old_ctx = read_c0_entryhi();
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_stop();
|
2005-04-17 06:20:36 +08:00
|
|
|
write_c0_entrylo0(0);
|
|
|
|
write_c0_entrylo1(0);
|
|
|
|
|
2016-11-12 09:26:07 +08:00
|
|
|
entry = num_wired_entries();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-09-20 16:47:25 +08:00
|
|
|
/*
|
|
|
|
* Blast 'em all away.
|
|
|
|
* If there are any wired entries, fall back to iterating
|
|
|
|
*/
|
|
|
|
if (cpu_has_tlbinv && !entry) {
|
2013-11-15 00:12:31 +08:00
|
|
|
if (current_cpu_data.tlbsizevtlb) {
|
|
|
|
write_c0_index(0);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlbinvf(); /* invalidate VTLB */
|
|
|
|
}
|
|
|
|
ftlbhighset = current_cpu_data.tlbsizevtlb +
|
|
|
|
current_cpu_data.tlbsizeftlbsets;
|
|
|
|
for (entry = current_cpu_data.tlbsizevtlb;
|
|
|
|
entry < ftlbhighset;
|
|
|
|
entry++) {
|
|
|
|
write_c0_index(entry);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlbinvf(); /* invalidate one FTLB set */
|
|
|
|
}
|
2013-11-15 00:12:30 +08:00
|
|
|
} else {
|
|
|
|
while (entry < current_cpu_data.tlbsize) {
|
|
|
|
/* Make sure all entries differ. */
|
|
|
|
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
|
|
|
write_c0_index(entry);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
entry++;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
tlbw_use_hazard();
|
|
|
|
write_c0_entryhi(old_ctx);
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_start();
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb();
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-11-22 10:34:10 +08:00
|
|
|
EXPORT_SYMBOL(local_flush_tlb_all);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if (cpu_context(cpu, mm) != 0) {
|
2009-05-20 14:12:32 +08:00
|
|
|
unsigned long size, flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
2012-12-04 04:44:26 +08:00
|
|
|
start = round_down(start, PAGE_SIZE << 1);
|
|
|
|
end = round_up(end, PAGE_SIZE << 1);
|
|
|
|
size = (end - start) >> (PAGE_SHIFT + 1);
|
2013-11-15 00:12:31 +08:00
|
|
|
if (size <= (current_cpu_data.tlbsizeftlbsets ?
|
|
|
|
current_cpu_data.tlbsize / 8 :
|
|
|
|
current_cpu_data.tlbsize / 2)) {
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
unsigned long old_entryhi, uninitialized_var(old_mmid);
|
2005-04-17 06:20:36 +08:00
|
|
|
int newpid = cpu_asid(cpu, mm);
|
|
|
|
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
old_entryhi = read_c0_entryhi();
|
|
|
|
if (cpu_has_mmid) {
|
|
|
|
old_mmid = read_c0_memorymapid();
|
|
|
|
write_c0_memorymapid(newpid);
|
|
|
|
}
|
|
|
|
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_stop();
|
2005-04-17 06:20:36 +08:00
|
|
|
while (start < end) {
|
|
|
|
int idx;
|
|
|
|
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
if (cpu_has_mmid)
|
|
|
|
write_c0_entryhi(start);
|
|
|
|
else
|
|
|
|
write_c0_entryhi(start | newpid);
|
2012-12-04 04:44:26 +08:00
|
|
|
start += (PAGE_SIZE << 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_probe();
|
2006-09-08 10:16:21 +08:00
|
|
|
tlb_probe_hazard();
|
2005-04-17 06:20:36 +08:00
|
|
|
idx = read_c0_index();
|
|
|
|
write_c0_entrylo0(0);
|
|
|
|
write_c0_entrylo1(0);
|
|
|
|
if (idx < 0)
|
|
|
|
continue;
|
|
|
|
/* Make sure all entries differ. */
|
2005-04-02 18:21:56 +08:00
|
|
|
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
2005-04-17 06:20:36 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
}
|
|
|
|
tlbw_use_hazard();
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
write_c0_entryhi(old_entryhi);
|
|
|
|
if (cpu_has_mmid)
|
|
|
|
write_c0_memorymapid(old_mmid);
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_start();
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2019-02-02 09:43:16 +08:00
|
|
|
drop_mmu_context(mm);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb();
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
2009-05-20 14:12:32 +08:00
|
|
|
unsigned long size, flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
|
|
size = (size + 1) >> 1;
|
2013-11-15 00:12:31 +08:00
|
|
|
if (size <= (current_cpu_data.tlbsizeftlbsets ?
|
|
|
|
current_cpu_data.tlbsize / 8 :
|
|
|
|
current_cpu_data.tlbsize / 2)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
int pid = read_c0_entryhi();
|
|
|
|
|
|
|
|
start &= (PAGE_MASK << 1);
|
|
|
|
end += ((PAGE_SIZE << 1) - 1);
|
|
|
|
end &= (PAGE_MASK << 1);
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_stop();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while (start < end) {
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
write_c0_entryhi(start);
|
|
|
|
start += (PAGE_SIZE << 1);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_probe();
|
2006-09-08 10:16:21 +08:00
|
|
|
tlb_probe_hazard();
|
2005-04-17 06:20:36 +08:00
|
|
|
idx = read_c0_index();
|
|
|
|
write_c0_entrylo0(0);
|
|
|
|
write_c0_entrylo1(0);
|
|
|
|
if (idx < 0)
|
|
|
|
continue;
|
|
|
|
/* Make sure all entries differ. */
|
2005-04-02 18:21:56 +08:00
|
|
|
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
2005-04-17 06:20:36 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
}
|
|
|
|
tlbw_use_hazard();
|
|
|
|
write_c0_entryhi(pid);
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_start();
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
local_flush_tlb_all();
|
|
|
|
}
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb();
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if (cpu_context(cpu, vma->vm_mm) != 0) {
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
unsigned long uninitialized_var(old_mmid);
|
|
|
|
unsigned long flags, old_entryhi;
|
|
|
|
int idx;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
page &= (PAGE_MASK << 1);
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
old_entryhi = read_c0_entryhi();
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_stop();
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
if (cpu_has_mmid) {
|
|
|
|
old_mmid = read_c0_memorymapid();
|
|
|
|
write_c0_entryhi(page);
|
|
|
|
write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
|
|
|
|
} else {
|
|
|
|
write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_probe();
|
2006-09-08 10:16:21 +08:00
|
|
|
tlb_probe_hazard();
|
2005-04-17 06:20:36 +08:00
|
|
|
idx = read_c0_index();
|
|
|
|
write_c0_entrylo0(0);
|
|
|
|
write_c0_entrylo1(0);
|
|
|
|
if (idx < 0)
|
|
|
|
goto finish;
|
|
|
|
/* Make sure all entries differ. */
|
2005-04-02 18:21:56 +08:00
|
|
|
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
2005-04-17 06:20:36 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
tlbw_use_hazard();
|
|
|
|
|
|
|
|
finish:
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
write_c0_entryhi(old_entryhi);
|
|
|
|
if (cpu_has_mmid)
|
|
|
|
write_c0_memorymapid(old_mmid);
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_start();
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb_vm(vma);
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This one is only used for pages with the global bit set so we don't care
|
|
|
|
* much about the ASID.
|
|
|
|
*/
|
|
|
|
void local_flush_tlb_one(unsigned long page)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int oldpid, idx;
|
|
|
|
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
oldpid = read_c0_entryhi();
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_stop();
|
2005-04-02 18:21:56 +08:00
|
|
|
page &= (PAGE_MASK << 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
write_c0_entryhi(page);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_probe();
|
2006-09-08 10:16:21 +08:00
|
|
|
tlb_probe_hazard();
|
2005-04-17 06:20:36 +08:00
|
|
|
idx = read_c0_index();
|
|
|
|
write_c0_entrylo0(0);
|
|
|
|
write_c0_entrylo1(0);
|
|
|
|
if (idx >= 0) {
|
|
|
|
/* Make sure all entries differ. */
|
2005-04-02 18:21:56 +08:00
|
|
|
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
2005-04-17 06:20:36 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
tlbw_use_hazard();
|
|
|
|
}
|
|
|
|
write_c0_entryhi(oldpid);
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_start();
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb();
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We will need multiple versions of update_mmu_cache(), one that just
|
|
|
|
* updates the TLB with the new pte(s), and another which also checks
|
|
|
|
* for the R4k "end of page" hardware bug and does the needy.
|
|
|
|
*/
|
|
|
|
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
pgd_t *pgdp;
|
2005-02-10 20:19:59 +08:00
|
|
|
pud_t *pudp;
|
2005-04-17 06:20:36 +08:00
|
|
|
pmd_t *pmdp;
|
|
|
|
pte_t *ptep;
|
|
|
|
int idx, pid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle debugger faulting in for debugee.
|
|
|
|
*/
|
|
|
|
if (current->active_mm != vma->vm_mm)
|
|
|
|
return;
|
|
|
|
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
2005-04-02 18:21:56 +08:00
|
|
|
|
2014-11-17 17:31:07 +08:00
|
|
|
htw_stop();
|
2005-04-17 06:20:36 +08:00
|
|
|
address &= (PAGE_MASK << 1);
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
if (cpu_has_mmid) {
|
|
|
|
write_c0_entryhi(address);
|
|
|
|
} else {
|
|
|
|
pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
|
|
|
|
write_c0_entryhi(address | pid);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
pgdp = pgd_offset(vma->vm_mm, address);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_probe();
|
2006-09-08 10:16:21 +08:00
|
|
|
tlb_probe_hazard();
|
2005-02-10 20:19:59 +08:00
|
|
|
pudp = pud_offset(pgdp, address);
|
|
|
|
pmdp = pmd_offset(pudp, address);
|
2005-04-17 06:20:36 +08:00
|
|
|
idx = read_c0_index();
|
2012-10-17 06:48:10 +08:00
|
|
|
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
2009-05-28 08:47:44 +08:00
|
|
|
/* this could be a huge page */
|
|
|
|
if (pmd_huge(*pmdp)) {
|
|
|
|
unsigned long lo;
|
|
|
|
write_c0_pagemask(PM_HUGE_MASK);
|
|
|
|
ptep = (pte_t *)pmdp;
|
2010-02-11 07:12:47 +08:00
|
|
|
lo = pte_to_entrylo(pte_val(*ptep));
|
2009-05-28 08:47:44 +08:00
|
|
|
write_c0_entrylo0(lo);
|
|
|
|
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
|
|
|
|
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
if (idx < 0)
|
|
|
|
tlb_write_random();
|
|
|
|
else
|
|
|
|
tlb_write_indexed();
|
2012-10-17 07:01:21 +08:00
|
|
|
tlbw_use_hazard();
|
2009-05-28 08:47:44 +08:00
|
|
|
write_c0_pagemask(PM_DEFAULT_MASK);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
ptep = pte_offset_map(pmdp, address);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-11-22 07:16:48 +08:00
|
|
|
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
|
2015-02-27 08:16:38 +08:00
|
|
|
#ifdef CONFIG_XPA
|
|
|
|
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
|
2016-04-19 16:25:10 +08:00
|
|
|
if (cpu_has_xpa)
|
|
|
|
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
|
2015-02-27 08:16:38 +08:00
|
|
|
ptep++;
|
|
|
|
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
|
2016-04-19 16:25:10 +08:00
|
|
|
if (cpu_has_xpa)
|
|
|
|
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
|
2015-02-27 08:16:38 +08:00
|
|
|
#else
|
2009-05-28 08:47:44 +08:00
|
|
|
write_c0_entrylo0(ptep->pte_high);
|
|
|
|
ptep++;
|
|
|
|
write_c0_entrylo1(ptep->pte_high);
|
2015-02-27 08:16:38 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
2010-02-11 07:12:47 +08:00
|
|
|
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
|
|
|
|
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2009-05-28 08:47:44 +08:00
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
if (idx < 0)
|
|
|
|
tlb_write_random();
|
|
|
|
else
|
|
|
|
tlb_write_indexed();
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
tlbw_use_hazard();
|
2014-11-17 17:31:07 +08:00
|
|
|
htw_start();
|
2016-03-03 09:45:11 +08:00
|
|
|
flush_micro_tlb_vm(vma);
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-08-03 01:51:08 +08:00
|
|
|
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|
|
|
unsigned long entryhi, unsigned long pagemask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-02-27 08:16:38 +08:00
|
|
|
#ifdef CONFIG_XPA
|
|
|
|
panic("Broken for XPA kernels");
|
|
|
|
#else
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
unsigned int uninitialized_var(old_mmid);
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
|
|
|
unsigned long wired;
|
|
|
|
unsigned long old_pagemask;
|
|
|
|
unsigned long old_ctx;
|
|
|
|
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_save(flags);
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
if (cpu_has_mmid) {
|
|
|
|
old_mmid = read_c0_memorymapid();
|
|
|
|
write_c0_memorymapid(MMID_KERNEL_WIRED);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Save old context and create impossible VPN2 value */
|
|
|
|
old_ctx = read_c0_entryhi();
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_stop();
|
2005-04-17 06:20:36 +08:00
|
|
|
old_pagemask = read_c0_pagemask();
|
2016-11-12 09:26:07 +08:00
|
|
|
wired = num_wired_entries();
|
2005-04-17 06:20:36 +08:00
|
|
|
write_c0_wired(wired + 1);
|
|
|
|
write_c0_index(wired);
|
2006-09-08 10:16:21 +08:00
|
|
|
tlbw_use_hazard(); /* What is the hazard here? */
|
2005-04-17 06:20:36 +08:00
|
|
|
write_c0_pagemask(pagemask);
|
|
|
|
write_c0_entryhi(entryhi);
|
|
|
|
write_c0_entrylo0(entrylo0);
|
|
|
|
write_c0_entrylo1(entrylo1);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
tlbw_use_hazard();
|
|
|
|
|
|
|
|
write_c0_entryhi(old_ctx);
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 09:43:28 +08:00
|
|
|
if (cpu_has_mmid)
|
|
|
|
write_c0_memorymapid(old_mmid);
|
2006-09-08 10:16:21 +08:00
|
|
|
tlbw_use_hazard(); /* What is the hazard here? */
|
MIPS: mm: Use the Hardware Page Table Walker if the core supports it
The Hardware Page Table Walker aims to speed up TLB refill exceptions
by handling them in the hardware level instead of having a software
TLB refill handler. However, a TLB refill exception can still be
thrown in certain cases such as, synchronus exceptions, or address
translation or memory errors during the HTW operation. As a result of
which, HTW must not be considered a complete replacement for the TLB
refill software handler, but rather a fast-path for it.
For HTW to work, the PWBase register must contain the task's page
global directory address so the HTW will kick in on TLB refill
exceptions.
Due to HTW being a separate engine embedded deep in the CPU pipeline,
we need to restart the HTW everytime a PTE changes to avoid HTW
fetching a old entry from the page tables. It's also necessary to
restart the HTW on context switches to prevent it from fetching a
page from the previous process. Finally, since HTW is using the
entryhi register to write the translations to the TLB, it's necessary
to stop the HTW whenever the entryhi changes (eg for tlb probe
perations) and enable it back afterwards.
== Performance ==
The following trivial test was used to measure the performance of the
HTW. Using the same root filesystem, the following command was used
to measure the number of tlb refill handler executions with and
without (using 'nohtw' kernel parameter) HTW support. The kernel was
modified to use a scratch register as a counter for the TLB refill
exceptions.
find /usr -type f -exec ls -lh {} \;
HTW Enabled:
TLB refill exceptions: 12306
HTW Disabled:
TLB refill exceptions: 17805
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/7336/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-07-14 19:47:09 +08:00
|
|
|
htw_start();
|
2005-04-17 06:20:36 +08:00
|
|
|
write_c0_pagemask(old_pagemask);
|
|
|
|
local_flush_tlb_all();
|
2014-05-23 22:29:44 +08:00
|
|
|
local_irq_restore(flags);
|
2015-02-27 08:16:38 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-10-18 19:54:15 +08:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
|
arch: fix has_transparent_hugepage()
I've just discovered that the useful-sounding has_transparent_hugepage()
is actually an architecture-dependent minefield: on some arches it only
builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when
not, but on some of those (arm and arm64) it then gives the wrong
answer; and on mips alone it's marked __init, which would crash if
called later (but so far it has not been called later).
Straighten this out: make it available to all configs, with a sensible
default in asm-generic/pgtable.h, removing its definitions from those
arches (arc, arm, arm64, sparc, tile) which are served by the default,
adding #define has_transparent_hugepage has_transparent_hugepage to
those (mips, powerpc, s390, x86) which need to override the default at
runtime, and removing the __init from mips (but maybe that kind of code
should be avoided after init: set a static variable the first time it's
called).
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390]
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
|
|
|
int has_transparent_hugepage(void)
|
2012-10-18 19:54:15 +08:00
|
|
|
{
|
arch: fix has_transparent_hugepage()
I've just discovered that the useful-sounding has_transparent_hugepage()
is actually an architecture-dependent minefield: on some arches it only
builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when
not, but on some of those (arm and arm64) it then gives the wrong
answer; and on mips alone it's marked __init, which would crash if
called later (but so far it has not been called later).
Straighten this out: make it available to all configs, with a sensible
default in asm-generic/pgtable.h, removing its definitions from those
arches (arc, arm, arm64, sparc, tile) which are served by the default,
adding #define has_transparent_hugepage has_transparent_hugepage to
those (mips, powerpc, s390, x86) which need to override the default at
runtime, and removing the __init from mips (but maybe that kind of code
should be avoided after init: set a static variable the first time it's
called).
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390]
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
|
|
|
static unsigned int mask = -1;
|
2012-10-18 19:54:15 +08:00
|
|
|
|
arch: fix has_transparent_hugepage()
I've just discovered that the useful-sounding has_transparent_hugepage()
is actually an architecture-dependent minefield: on some arches it only
builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when
not, but on some of those (arm and arm64) it then gives the wrong
answer; and on mips alone it's marked __init, which would crash if
called later (but so far it has not been called later).
Straighten this out: make it available to all configs, with a sensible
default in asm-generic/pgtable.h, removing its definitions from those
arches (arc, arm, arm64, sparc, tile) which are served by the default,
adding #define has_transparent_hugepage has_transparent_hugepage to
those (mips, powerpc, s390, x86) which need to override the default at
runtime, and removing the __init from mips (but maybe that kind of code
should be avoided after init: set a static variable the first time it's
called).
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390]
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
|
|
|
if (mask == -1) { /* first call comes during __init */
|
|
|
|
unsigned long flags;
|
2012-10-18 19:54:15 +08:00
|
|
|
|
arch: fix has_transparent_hugepage()
I've just discovered that the useful-sounding has_transparent_hugepage()
is actually an architecture-dependent minefield: on some arches it only
builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when
not, but on some of those (arm and arm64) it then gives the wrong
answer; and on mips alone it's marked __init, which would crash if
called later (but so far it has not been called later).
Straighten this out: make it available to all configs, with a sensible
default in asm-generic/pgtable.h, removing its definitions from those
arches (arc, arm, arm64, sparc, tile) which are served by the default,
adding #define has_transparent_hugepage has_transparent_hugepage to
those (mips, powerpc, s390, x86) which need to override the default at
runtime, and removing the __init from mips (but maybe that kind of code
should be avoided after init: set a static variable the first time it's
called).
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390]
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 08:13:00 +08:00
|
|
|
local_irq_save(flags);
|
|
|
|
write_c0_pagemask(PM_HUGE_MASK);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
mask = read_c0_pagemask();
|
|
|
|
write_c0_pagemask(PM_DEFAULT_MASK);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2012-10-18 19:54:15 +08:00
|
|
|
return mask == PM_HUGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
|
2014-07-18 05:26:32 +08:00
|
|
|
/*
|
|
|
|
* Used for loading TLB entries before trap_init() has started, when we
|
|
|
|
* don't actually want to add a wired entry which remains throughout the
|
|
|
|
* lifetime of the system
|
|
|
|
*/
|
|
|
|
|
2015-04-28 06:47:56 +08:00
|
|
|
int temp_tlb_entry;
|
2014-07-18 05:26:32 +08:00
|
|
|
|
|
|
|
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|
|
|
unsigned long entryhi, unsigned long pagemask)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long wired;
|
|
|
|
unsigned long old_pagemask;
|
|
|
|
unsigned long old_ctx;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
/* Save old context and create impossible VPN2 value */
|
2014-11-17 17:31:07 +08:00
|
|
|
htw_stop();
|
2014-07-18 05:26:32 +08:00
|
|
|
old_ctx = read_c0_entryhi();
|
|
|
|
old_pagemask = read_c0_pagemask();
|
2016-11-12 09:26:07 +08:00
|
|
|
wired = num_wired_entries();
|
2014-07-18 05:26:32 +08:00
|
|
|
if (--temp_tlb_entry < wired) {
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"No TLB space left for add_temporary_entry\n");
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
write_c0_index(temp_tlb_entry);
|
|
|
|
write_c0_pagemask(pagemask);
|
|
|
|
write_c0_entryhi(entryhi);
|
|
|
|
write_c0_entrylo0(entrylo0);
|
|
|
|
write_c0_entrylo1(entrylo1);
|
|
|
|
mtc0_tlbw_hazard();
|
|
|
|
tlb_write_indexed();
|
|
|
|
tlbw_use_hazard();
|
|
|
|
|
|
|
|
write_c0_entryhi(old_ctx);
|
|
|
|
write_c0_pagemask(old_pagemask);
|
2014-11-17 17:31:07 +08:00
|
|
|
htw_start();
|
2014-07-18 05:26:32 +08:00
|
|
|
out:
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 21:38:59 +08:00
|
|
|
static int ntlb;
|
2006-04-05 16:45:45 +08:00
|
|
|
static int __init set_ntlb(char *str)
|
|
|
|
{
|
|
|
|
get_option(&str, &ntlb);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("ntlb=", set_ntlb);
|
|
|
|
|
2014-03-01 01:09:20 +08:00
|
|
|
/*
|
|
|
|
* Configure TLB (for init or after a CPU has been powered off).
|
|
|
|
*/
|
|
|
|
static void r4k_tlb_configure(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* You should never change this register:
|
|
|
|
* - On R4600 1.7 the tlbp never hits for pages smaller than
|
|
|
|
* the value in the c0_pagemask register.
|
|
|
|
* - The entire mm handling assumes the c0_pagemask register to
|
2008-02-29 08:43:47 +08:00
|
|
|
* be set to fixed-size pages.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
write_c0_pagemask(PM_DEFAULT_MASK);
|
2015-07-14 00:12:44 +08:00
|
|
|
back_to_back_c0_hazard();
|
|
|
|
if (read_c0_pagemask() != PM_DEFAULT_MASK)
|
|
|
|
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
write_c0_wired(0);
|
2009-01-07 07:07:20 +08:00
|
|
|
if (current_cpu_type() == CPU_R10000 ||
|
|
|
|
current_cpu_type() == CPU_R12000 ||
|
2015-01-21 20:59:45 +08:00
|
|
|
current_cpu_type() == CPU_R14000 ||
|
|
|
|
current_cpu_type() == CPU_R16000)
|
2009-01-07 07:07:20 +08:00
|
|
|
write_c0_framemask(0);
|
2010-02-11 07:12:47 +08:00
|
|
|
|
2012-09-14 05:51:46 +08:00
|
|
|
if (cpu_has_rixi) {
|
2010-02-11 07:12:47 +08:00
|
|
|
/*
|
2015-05-13 18:50:55 +08:00
|
|
|
* Enable the no read, no exec bits, and enable large physical
|
2010-02-11 07:12:47 +08:00
|
|
|
* address.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_64BIT
|
2015-02-20 00:18:52 +08:00
|
|
|
set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
|
|
|
|
#else
|
|
|
|
set_c0_pagegrain(PG_RIE | PG_XIE);
|
2010-02-11 07:12:47 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-07-18 05:26:32 +08:00
|
|
|
temp_tlb_entry = current_cpu_data.tlbsize - 1;
|
|
|
|
|
2013-01-22 19:59:30 +08:00
|
|
|
/* From this point on the ARC firmware is dead. */
|
2005-04-17 06:20:36 +08:00
|
|
|
local_flush_tlb_all();
|
|
|
|
|
2006-03-14 22:35:27 +08:00
|
|
|
/* Did I tell you that ARC SUCKS? */
|
2014-03-01 01:09:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void tlb_init(void)
|
|
|
|
{
|
|
|
|
r4k_tlb_configure();
|
2006-03-14 22:35:27 +08:00
|
|
|
|
2006-04-05 16:45:45 +08:00
|
|
|
if (ntlb) {
|
|
|
|
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
|
|
|
|
int wired = current_cpu_data.tlbsize - ntlb;
|
|
|
|
write_c0_wired(wired);
|
|
|
|
write_c0_index(wired-1);
|
2007-10-12 06:46:15 +08:00
|
|
|
printk("Restricting TLB to %d entries\n", ntlb);
|
2006-04-05 16:45:45 +08:00
|
|
|
} else
|
|
|
|
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
build_tlb_refill_handler();
|
|
|
|
}
|
2014-03-01 01:09:20 +08:00
|
|
|
|
|
|
|
static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
|
|
|
void *v)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case CPU_PM_ENTER_FAILED:
|
|
|
|
case CPU_PM_EXIT:
|
|
|
|
r4k_tlb_configure();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block r4k_tlb_pm_notifier_block = {
|
|
|
|
.notifier_call = r4k_tlb_pm_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init r4k_tlb_init_pm(void)
|
|
|
|
{
|
|
|
|
return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
|
|
|
|
}
|
|
|
|
arch_initcall(r4k_tlb_init_pm);
|