2005-09-22 09:50:51 +08:00
|
|
|
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
|
|
|
|
*
|
2008-01-11 13:10:54 +08:00
|
|
|
* Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
|
2005-09-22 09:50:51 +08:00
|
|
|
* Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
|
|
|
|
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
|
|
|
|
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
2006-02-01 10:29:18 +08:00
|
|
|
*/
|
2005-09-22 09:50:51 +08:00
|
|
|
|
|
|
|
#include <asm/head.h>
|
|
|
|
#include <asm/asi.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgtable.h>
|
2006-02-01 10:29:18 +08:00
|
|
|
#include <asm/tsb.h>
|
2005-09-22 09:50:51 +08:00
|
|
|
|
|
|
|
.text
|
|
|
|
.align 32
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_itlb:
|
|
|
|
/* g6: TAG TARGET */
|
|
|
|
mov TLB_TAG_ACCESS, %g4
|
|
|
|
ldxa [%g4] ASI_IMMU, %g4
|
|
|
|
|
2006-02-07 15:44:37 +08:00
|
|
|
/* sun4v_itlb_miss branches here with the missing virtual
|
|
|
|
* address already loaded into %g4
|
|
|
|
*/
|
|
|
|
kvmap_itlb_4v:
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
/* Catch kernel NULL pointer calls. */
|
|
|
|
sethi %hi(PAGE_SIZE), %g5
|
|
|
|
cmp %g4, %g5
|
2013-08-02 23:23:18 +08:00
|
|
|
blu,pn %xcc, kvmap_itlb_longpath
|
2006-02-01 10:29:18 +08:00
|
|
|
nop
|
|
|
|
|
|
|
|
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
|
|
|
|
|
|
|
|
kvmap_itlb_tsb_miss:
|
2005-09-22 09:50:51 +08:00
|
|
|
sethi %hi(LOW_OBP_ADDRESS), %g5
|
|
|
|
cmp %g4, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
blu,pn %xcc, kvmap_itlb_vmalloc_addr
|
2005-09-22 09:50:51 +08:00
|
|
|
mov 0x1, %g5
|
|
|
|
sllx %g5, 32, %g5
|
|
|
|
cmp %g4, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
blu,pn %xcc, kvmap_itlb_obp
|
2005-09-22 09:50:51 +08:00
|
|
|
nop
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_itlb_vmalloc_addr:
|
|
|
|
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
|
|
|
|
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
|
|
|
TSB_WRITE(%g1, %g5, %g6)
|
2006-02-01 10:29:18 +08:00
|
|
|
|
|
|
|
/* fallthrough to TLB load */
|
|
|
|
|
|
|
|
kvmap_itlb_load:
|
2006-02-12 04:21:20 +08:00
|
|
|
|
|
|
|
661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
|
2005-09-22 09:50:51 +08:00
|
|
|
retry
|
2006-02-12 04:21:20 +08:00
|
|
|
.section .sun4v_2insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
.previous
|
|
|
|
|
|
|
|
/* For sun4v the ASI_ITLB_DATA_IN store and the retry
|
|
|
|
* instruction get nop'd out and we get here to branch
|
|
|
|
* to the sun4v tlb load code. The registers are setup
|
|
|
|
* as follows:
|
|
|
|
*
|
|
|
|
* %g4: vaddr
|
|
|
|
* %g5: PTE
|
|
|
|
* %g6: TAG
|
|
|
|
*
|
|
|
|
* The sun4v TLB load wants the PTE in %g3 so we fix that
|
|
|
|
* up here.
|
|
|
|
*/
|
|
|
|
ba,pt %xcc, sun4v_itlb_load
|
|
|
|
mov %g5, %g3
|
2005-09-22 09:50:51 +08:00
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_itlb_longpath:
|
2006-02-06 14:27:28 +08:00
|
|
|
|
|
|
|
661: rdpr %pstate, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
|
2006-02-07 16:00:16 +08:00
|
|
|
.section .sun4v_2insn_patch, "ax"
|
2006-02-06 14:27:28 +08:00
|
|
|
.word 661b
|
2006-02-19 08:36:39 +08:00
|
|
|
SET_GL(1)
|
2006-02-06 14:27:28 +08:00
|
|
|
nop
|
|
|
|
.previous
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
rdpr %tpc, %g5
|
|
|
|
ba,pt %xcc, sparc64_realfault_common
|
|
|
|
mov FAULT_CODE_ITLB, %g4
|
|
|
|
|
|
|
|
kvmap_itlb_obp:
|
|
|
|
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
|
|
|
|
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
2006-02-01 10:29:18 +08:00
|
|
|
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_WRITE(%g1, %g5, %g6)
|
2006-02-01 10:29:18 +08:00
|
|
|
|
|
|
|
ba,pt %xcc, kvmap_itlb_load
|
|
|
|
nop
|
|
|
|
|
|
|
|
kvmap_dtlb_obp:
|
|
|
|
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
|
|
|
|
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
2006-02-01 10:29:18 +08:00
|
|
|
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_WRITE(%g1, %g5, %g6)
|
2006-02-01 10:29:18 +08:00
|
|
|
|
|
|
|
ba,pt %xcc, kvmap_dtlb_load
|
|
|
|
nop
|
2005-10-13 03:22:46 +08:00
|
|
|
|
sparc64: Fix physical memory management regressions with large max_phys_bits.
If max_phys_bits needs to be > 43 (f.e. for T4 chips), things like
DEBUG_PAGEALLOC stop working because the 3-level page tables only
can cover up to 43 bits.
Another problem is that when we increased MAX_PHYS_ADDRESS_BITS up to
47, several statically allocated tables became enormous.
Compounding this is that we will need to support up to 49 bits of
physical addressing for M7 chips.
The two tables in question are sparc64_valid_addr_bitmap and
kpte_linear_bitmap.
The first holds a bitmap, with 1 bit for each 4MB chunk of physical
memory, indicating whether that chunk actually exists in the machine
and is valid.
The second table is a set of 2-bit values which tell how large of a
mapping (4MB, 256MB, 2GB, 16GB, respectively) we can use at each 256MB
chunk of ram in the system.
These tables are huge and take up an enormous amount of the BSS
section of the sparc64 kernel image. Specifically, the
sparc64_valid_addr_bitmap is 4MB, and the kpte_linear_bitmap is 128K.
So let's solve the space wastage and the DEBUG_PAGEALLOC problem
at the same time, by using the kernel page tables (as designed) to
manage this information.
We have to keep using large mappings when DEBUG_PAGEALLOC is disabled,
and we do this by encoding huge PMDs and PUDs.
On a T4-2 with 256GB of ram the kernel page table takes up 16K with
DEBUG_PAGEALLOC disabled and 256MB with it enabled. Furthermore, this
memory is dynamically allocated at run time rather than coded
statically into the kernel image.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Bob Picco <bob.picco@oracle.com>
2014-09-25 11:56:11 +08:00
|
|
|
kvmap_linear_early:
|
|
|
|
sethi %hi(kern_linear_pte_xor), %g7
|
|
|
|
ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
|
|
|
|
ba,pt %xcc, kvmap_dtlb_tsb4m_load
|
|
|
|
xor %g2, %g4, %g5
|
|
|
|
|
2005-09-22 09:50:51 +08:00
|
|
|
.align 32
|
2006-02-22 14:31:11 +08:00
|
|
|
kvmap_dtlb_tsb4m_load:
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
|
|
|
TSB_WRITE(%g1, %g5, %g6)
|
2006-02-22 14:31:11 +08:00
|
|
|
ba,pt %xcc, kvmap_dtlb_load
|
|
|
|
nop
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_dtlb:
|
|
|
|
/* %g6: TAG TARGET */
|
|
|
|
mov TLB_TAG_ACCESS, %g4
|
|
|
|
ldxa [%g4] ASI_DMMU, %g4
|
2006-02-07 15:44:37 +08:00
|
|
|
|
|
|
|
/* sun4v_dtlb_miss branches here with the missing virtual
|
|
|
|
* address already loaded into %g4
|
|
|
|
*/
|
|
|
|
kvmap_dtlb_4v:
|
2006-02-01 10:29:18 +08:00
|
|
|
brgez,pn %g4, kvmap_dtlb_nonlinear
|
2005-09-26 07:46:57 +08:00
|
|
|
nop
|
|
|
|
|
2007-03-17 08:20:28 +08:00
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
|
/* Index through the base page size TSB even for linear
|
|
|
|
* mappings when using page allocation debugging.
|
|
|
|
*/
|
|
|
|
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
|
|
|
|
#else
|
2006-02-22 14:31:11 +08:00
|
|
|
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
|
|
|
|
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
|
2007-03-17 08:20:28 +08:00
|
|
|
#endif
|
sparc64: Fix physical memory management regressions with large max_phys_bits.
If max_phys_bits needs to be > 43 (f.e. for T4 chips), things like
DEBUG_PAGEALLOC stop working because the 3-level page tables only
can cover up to 43 bits.
Another problem is that when we increased MAX_PHYS_ADDRESS_BITS up to
47, several statically allocated tables became enormous.
Compounding this is that we will need to support up to 49 bits of
physical addressing for M7 chips.
The two tables in question are sparc64_valid_addr_bitmap and
kpte_linear_bitmap.
The first holds a bitmap, with 1 bit for each 4MB chunk of physical
memory, indicating whether that chunk actually exists in the machine
and is valid.
The second table is a set of 2-bit values which tell how large of a
mapping (4MB, 256MB, 2GB, 16GB, respectively) we can use at each 256MB
chunk of ram in the system.
These tables are huge and take up an enormous amount of the BSS
section of the sparc64 kernel image. Specifically, the
sparc64_valid_addr_bitmap is 4MB, and the kpte_linear_bitmap is 128K.
So let's solve the space wastage and the DEBUG_PAGEALLOC problem
at the same time, by using the kernel page tables (as designed) to
manage this information.
We have to keep using large mappings when DEBUG_PAGEALLOC is disabled,
and we do this by encoding huge PMDs and PUDs.
On a T4-2 with 256GB of ram the kernel page table takes up 16K with
DEBUG_PAGEALLOC disabled and 256MB with it enabled. Furthermore, this
memory is dynamically allocated at run time rather than coded
statically into the kernel image.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Bob Picco <bob.picco@oracle.com>
2014-09-25 11:56:11 +08:00
|
|
|
/* Linear mapping TSB lookup failed. Fallthrough to kernel
|
|
|
|
* page table based lookup.
|
2006-02-22 12:51:13 +08:00
|
|
|
*/
|
2005-09-26 07:46:57 +08:00
|
|
|
.globl kvmap_linear_patch
|
|
|
|
kvmap_linear_patch:
|
sparc64: Fix physical memory management regressions with large max_phys_bits.
If max_phys_bits needs to be > 43 (f.e. for T4 chips), things like
DEBUG_PAGEALLOC stop working because the 3-level page tables only
can cover up to 43 bits.
Another problem is that when we increased MAX_PHYS_ADDRESS_BITS up to
47, several statically allocated tables became enormous.
Compounding this is that we will need to support up to 49 bits of
physical addressing for M7 chips.
The two tables in question are sparc64_valid_addr_bitmap and
kpte_linear_bitmap.
The first holds a bitmap, with 1 bit for each 4MB chunk of physical
memory, indicating whether that chunk actually exists in the machine
and is valid.
The second table is a set of 2-bit values which tell how large of a
mapping (4MB, 256MB, 2GB, 16GB, respectively) we can use at each 256MB
chunk of ram in the system.
These tables are huge and take up an enormous amount of the BSS
section of the sparc64 kernel image. Specifically, the
sparc64_valid_addr_bitmap is 4MB, and the kpte_linear_bitmap is 128K.
So let's solve the space wastage and the DEBUG_PAGEALLOC problem
at the same time, by using the kernel page tables (as designed) to
manage this information.
We have to keep using large mappings when DEBUG_PAGEALLOC is disabled,
and we do this by encoding huge PMDs and PUDs.
On a T4-2 with 256GB of ram the kernel page table takes up 16K with
DEBUG_PAGEALLOC disabled and 256MB with it enabled. Furthermore, this
memory is dynamically allocated at run time rather than coded
statically into the kernel image.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Bob Picco <bob.picco@oracle.com>
2014-09-25 11:56:11 +08:00
|
|
|
ba,a,pt %xcc, kvmap_linear_early
|
2005-09-22 09:50:51 +08:00
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_dtlb_vmalloc_addr:
|
|
|
|
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
|
|
|
|
|
2011-08-05 15:53:57 +08:00
|
|
|
TSB_LOCK_TAG(%g1, %g2, %g7)
|
|
|
|
TSB_WRITE(%g1, %g5, %g6)
|
2006-02-01 10:29:18 +08:00
|
|
|
|
|
|
|
/* fallthrough to TLB load */
|
|
|
|
|
|
|
|
kvmap_dtlb_load:
|
2006-02-12 04:21:20 +08:00
|
|
|
|
|
|
|
661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
|
2006-02-01 10:29:18 +08:00
|
|
|
retry
|
2006-02-12 04:21:20 +08:00
|
|
|
.section .sun4v_2insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
.previous
|
|
|
|
|
|
|
|
/* For sun4v the ASI_DTLB_DATA_IN store and the retry
|
|
|
|
* instruction get nop'd out and we get here to branch
|
|
|
|
* to the sun4v tlb load code. The registers are setup
|
|
|
|
* as follows:
|
|
|
|
*
|
|
|
|
* %g4: vaddr
|
|
|
|
* %g5: PTE
|
|
|
|
* %g6: TAG
|
|
|
|
*
|
|
|
|
* The sun4v TLB load wants the PTE in %g3 so we fix that
|
|
|
|
* up here.
|
|
|
|
*/
|
|
|
|
ba,pt %xcc, sun4v_dtlb_load
|
|
|
|
mov %g5, %g3
|
2006-02-01 10:29:18 +08:00
|
|
|
|
2008-01-11 13:10:54 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2007-10-16 16:24:16 +08:00
|
|
|
kvmap_vmemmap:
|
|
|
|
sub %g4, %g5, %g5
|
2014-05-04 13:52:50 +08:00
|
|
|
srlx %g5, ILOG2_4MB, %g5
|
2007-10-16 16:24:16 +08:00
|
|
|
sethi %hi(vmemmap_table), %g1
|
|
|
|
sllx %g5, 3, %g5
|
|
|
|
or %g1, %lo(vmemmap_table), %g1
|
|
|
|
ba,pt %xcc, kvmap_dtlb_load
|
|
|
|
ldx [%g1 + %g5], %g5
|
2008-01-11 13:10:54 +08:00
|
|
|
#endif
|
2007-10-16 16:24:16 +08:00
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_dtlb_nonlinear:
|
|
|
|
/* Catch kernel NULL pointer derefs. */
|
|
|
|
sethi %hi(PAGE_SIZE), %g5
|
|
|
|
cmp %g4, %g5
|
|
|
|
bleu,pn %xcc, kvmap_dtlb_longpath
|
2005-09-26 07:46:57 +08:00
|
|
|
nop
|
|
|
|
|
2008-01-11 13:10:54 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2007-10-16 16:24:16 +08:00
|
|
|
/* Do not use the TSB for vmemmap. */
|
2009-09-29 05:39:58 +08:00
|
|
|
mov (VMEMMAP_BASE >> 40), %g5
|
|
|
|
sllx %g5, 40, %g5
|
2007-10-16 16:24:16 +08:00
|
|
|
cmp %g4,%g5
|
|
|
|
bgeu,pn %xcc, kvmap_vmemmap
|
|
|
|
nop
|
2008-01-11 13:10:54 +08:00
|
|
|
#endif
|
2007-10-16 16:24:16 +08:00
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
|
|
|
|
|
|
|
|
kvmap_dtlb_tsbmiss:
|
2005-09-22 09:50:51 +08:00
|
|
|
sethi %hi(MODULES_VADDR), %g5
|
|
|
|
cmp %g4, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
blu,pn %xcc, kvmap_dtlb_longpath
|
2009-09-29 05:39:58 +08:00
|
|
|
mov (VMALLOC_END >> 40), %g5
|
|
|
|
sllx %g5, 40, %g5
|
2005-09-22 09:50:51 +08:00
|
|
|
cmp %g4, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
bgeu,pn %xcc, kvmap_dtlb_longpath
|
2005-09-22 09:50:51 +08:00
|
|
|
nop
|
|
|
|
|
|
|
|
kvmap_check_obp:
|
|
|
|
sethi %hi(LOW_OBP_ADDRESS), %g5
|
|
|
|
cmp %g4, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
blu,pn %xcc, kvmap_dtlb_vmalloc_addr
|
2005-09-22 09:50:51 +08:00
|
|
|
mov 0x1, %g5
|
|
|
|
sllx %g5, 32, %g5
|
|
|
|
cmp %g4, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
blu,pn %xcc, kvmap_dtlb_obp
|
2005-09-22 09:50:51 +08:00
|
|
|
nop
|
2006-02-01 10:29:18 +08:00
|
|
|
ba,pt %xcc, kvmap_dtlb_vmalloc_addr
|
2005-09-22 09:50:51 +08:00
|
|
|
nop
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
kvmap_dtlb_longpath:
|
2006-02-06 14:27:28 +08:00
|
|
|
|
|
|
|
661: rdpr %pstate, %g5
|
2006-02-01 10:29:18 +08:00
|
|
|
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
|
2006-02-07 16:00:16 +08:00
|
|
|
.section .sun4v_2insn_patch, "ax"
|
2006-02-06 14:27:28 +08:00
|
|
|
.word 661b
|
2006-02-18 10:01:02 +08:00
|
|
|
SET_GL(1)
|
|
|
|
ldxa [%g0] ASI_SCRATCHPAD, %g5
|
2006-02-06 14:27:28 +08:00
|
|
|
.previous
|
|
|
|
|
2006-02-12 04:21:20 +08:00
|
|
|
rdpr %tl, %g3
|
|
|
|
cmp %g3, 1
|
|
|
|
|
|
|
|
661: mov TLB_TAG_ACCESS, %g4
|
2006-02-01 10:29:18 +08:00
|
|
|
ldxa [%g4] ASI_DMMU, %g5
|
2006-02-12 04:21:20 +08:00
|
|
|
.section .sun4v_2insn_patch, "ax"
|
|
|
|
.word 661b
|
2006-02-18 10:01:02 +08:00
|
|
|
ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
|
2006-02-12 04:21:20 +08:00
|
|
|
nop
|
|
|
|
.previous
|
|
|
|
|
2006-02-01 10:29:18 +08:00
|
|
|
be,pt %xcc, sparc64_realfault_common
|
|
|
|
mov FAULT_CODE_DTLB, %g4
|
|
|
|
ba,pt %xcc, winfix_trampoline
|
|
|
|
nop
|