KVM: PPC: BOOK3S: HV: Update compute_tlbie_rb to handle 16MB base page
When calculating the lower bits of AVA field, use the shift count based on the base page size. Also add the missing segment size and remove stale comment. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
7a58777a33
commit
63fff5c1e3
|
@ -147,6 +147,8 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
|
|||
*/
|
||||
/* This covers 14..54 bits of va*/
|
||||
rb = (v & ~0x7fUL) << 16; /* AVA field */
|
||||
|
||||
rb |= v >> (62 - 8); /* B field */
|
||||
/*
|
||||
* AVA in v had cleared lower 23 bits. We need to derive
|
||||
* that from pteg index
|
||||
|
@ -177,10 +179,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
|
|||
{
|
||||
int aval_shift;
|
||||
/*
|
||||
* remaining 7bits of AVA/LP fields
|
||||
* remaining bits of AVA/LP fields
|
||||
* Also contain the rr bits of LP
|
||||
*/
|
||||
rb |= (va_low & 0x7f) << 16;
|
||||
rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
|
||||
/*
|
||||
* Now clear not needed LP bits based on actual psize
|
||||
*/
|
||||
|
|
|
@ -2064,12 +2064,6 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
|
|||
(*sps)->page_shift = def->shift;
|
||||
(*sps)->slb_enc = def->sllp;
|
||||
(*sps)->enc[0].page_shift = def->shift;
|
||||
/*
|
||||
* Only return base page encoding. We don't want to return
|
||||
* all the supporting pte_enc, because our H_ENTER doesn't
|
||||
* support MPSS yet. Once they do, we can start passing all
|
||||
* support pte_enc here
|
||||
*/
|
||||
(*sps)->enc[0].pte_enc = def->penc[linux_psize];
|
||||
/*
|
||||
* Add 16MB MPSS support if host supports it
|
||||
|
|
Loading…
Reference in New Issue