powerpc/book3s/32: Use patch_site to patch hash functions

Use patch_sites and the new modify_instruction_site() function
instead of hardcoding hash functions patching.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Christophe Leroy 2018-11-09 17:33:24 +00:00 committed by Michael Ellerman
parent 4a3a224c5a
commit 9efc74ff52
3 changed files with 35 additions and 39 deletions

View File

@ -92,6 +92,12 @@ typedef struct {
unsigned long vdso_base; unsigned long vdso_base;
} mm_context_t; } mm_context_t;
/* patch sites */
extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use /* We happily ignore the smaller BATs on 601, we don't actually use

View File

@ -28,6 +28,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/feature-fixups.h> #include <asm/feature-fixups.h>
#include <asm/code-patching-asm.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.section .bss .section .bss
@ -337,11 +338,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
SET_V(r5) /* set V (valid) bit */ SET_V(r5) /* set V (valid) bit */
patch_site 0f, patch__hash_page_A0
patch_site 1f, patch__hash_page_A1
patch_site 2f, patch__hash_page_A2
/* Get the address of the primary PTE group in the hash table (r3) */ /* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(hash_page_patch_A) 0: addis r0,r7,Hash_base@h /* base address of hash table */
addis r0,r7,Hash_base@h /* base address of hash table */ 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r0 /* make primary hash */ xor r3,r3,r0 /* make primary hash */
li r0,8 /* PTEs/group */ li r0,8 /* PTEs/group */
@ -366,10 +369,10 @@ _GLOBAL(hash_page_patch_A)
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_slot beq+ found_slot
patch_site 0f, patch__hash_page_B
/* Search the secondary PTEG for a matching PTE */ /* Search the secondary PTEG for a matching PTE */
ori r5,r5,PTE_H /* set H (secondary hash) bit */ ori r5,r5,PTE_H /* set H (secondary hash) bit */
_GLOBAL(hash_page_patch_B) 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xori r4,r4,(-PTEG_SIZE & 0xffff) xori r4,r4,(-PTEG_SIZE & 0xffff)
addi r4,r4,-HPTE_SIZE addi r4,r4,-HPTE_SIZE
mtctr r0 mtctr r0
@ -393,10 +396,10 @@ _GLOBAL(hash_page_patch_B)
addi r6,r6,1 addi r6,r6,1
stw r6,primary_pteg_full@l(r4) stw r6,primary_pteg_full@l(r4)
patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */ /* Search the secondary PTEG for an empty slot */
ori r5,r5,PTE_H /* set H (secondary hash) bit */ ori r5,r5,PTE_H /* set H (secondary hash) bit */
_GLOBAL(hash_page_patch_C) 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xori r4,r4,(-PTEG_SIZE & 0xffff) xori r4,r4,(-PTEG_SIZE & 0xffff)
addi r4,r4,-HPTE_SIZE addi r4,r4,-HPTE_SIZE
mtctr r0 mtctr r0
@ -577,11 +580,13 @@ _GLOBAL(flush_hash_pages)
stwcx. r8,0,r5 /* update the pte */ stwcx. r8,0,r5 /* update the pte */
bne- 33b bne- 33b
patch_site 0f, patch__flush_hash_A0
patch_site 1f, patch__flush_hash_A1
patch_site 2f, patch__flush_hash_A2
/* Get the address of the primary PTE group in the hash table (r3) */ /* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(flush_hash_patch_A) 0: addis r8,r7,Hash_base@h /* base address of hash table */
addis r8,r7,Hash_base@h /* base address of hash table */ 1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r8,r0,r8 /* make primary hash */ xor r8,r0,r8 /* make primary hash */
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
@ -593,11 +598,11 @@ _GLOBAL(flush_hash_patch_A)
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ 3f beq+ 3f
patch_site 0f, patch__flush_hash_B
/* Search the secondary PTEG for a matching PTE */ /* Search the secondary PTEG for a matching PTE */
ori r11,r11,PTE_H /* set H (secondary hash) bit */ ori r11,r11,PTE_H /* set H (secondary hash) bit */
li r0,8 /* PTEs/group */ li r0,8 /* PTEs/group */
_GLOBAL(flush_hash_patch_B) 0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
xori r12,r12,(-PTEG_SIZE & 0xffff) xori r12,r12,(-PTEG_SIZE & 0xffff)
addi r12,r12,-HPTE_SIZE addi r12,r12,-HPTE_SIZE
mtctr r0 mtctr r0

View File

@ -31,6 +31,7 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/code-patching.h>
#include "mmu_decl.h" #include "mmu_decl.h"
@ -182,10 +183,6 @@ void __init MMU_init_hw(void)
unsigned int hmask, mb, mb2; unsigned int hmask, mb, mb2;
unsigned int n_hpteg, lg_n_hpteg; unsigned int n_hpteg, lg_n_hpteg;
extern unsigned int hash_page_patch_A[];
extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
return; return;
@ -234,31 +231,19 @@ void __init MMU_init_hw(void)
if (lg_n_hpteg > 16) if (lg_n_hpteg > 16)
mb2 = 16 - LG_HPTEG_SIZE; mb2 = 16 - LG_HPTEG_SIZE;
hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff) modify_instruction_site(&patch__hash_page_A0, 0xffff, (unsigned int)Hash >> 16);
| ((unsigned int)(Hash) >> 16); modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6);
hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6);
hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6); modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask; modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
/*
* Ensure that the locations we've patched have been written
* out from the data cache and invalidated in the instruction
* cache, on those machines with split caches.
*/
flush_icache_range((unsigned long) &hash_page_patch_A[0],
(unsigned long) &hash_page_patch_C[1]);
/* /*
* Patch up the instructions in hashtable.S:flush_hash_page * Patch up the instructions in hashtable.S:flush_hash_page
*/ */
flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff) modify_instruction_site(&patch__flush_hash_A0, 0xffff, (unsigned int)Hash >> 16);
| ((unsigned int)(Hash) >> 16); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6);
flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6);
flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
flush_icache_range((unsigned long) &flush_hash_patch_A[0],
(unsigned long) &flush_hash_patch_B[1]);
if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
} }