powerpc/64s: Remove MSR[ISF] bit

No supported processor implements this mode. Setting the bit in
MSR values can be a bit confusing (and would prevent the bit from
ever being reused). Remove it.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201106045340.1935841-1-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2020-11-06 14:53:40 +10:00 committed by Michael Ellerman
parent c33cd1ed60
commit e89a8ca94b
4 changed files with 4 additions and 8 deletions

View File

@ -29,7 +29,6 @@
#include <asm/reg_8xx.h> #include <asm/reg_8xx.h>
#define MSR_SF_LG 63 /* Enable 64 bit mode */ #define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
#define MSR_HV_LG 60 /* Hypervisor state */ #define MSR_HV_LG 60 /* Hypervisor state */
#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */ #define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */
#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */ #define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
@ -69,13 +68,11 @@
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
#define MSR_S __MASK(MSR_S_LG) /* Secure state */ #define MSR_S __MASK(MSR_S_LG) /* Secure state */
#else #else
/* so tests for these bits fail on 32-bit */ /* so tests for these bits fail on 32-bit */
#define MSR_SF 0 #define MSR_SF 0
#define MSR_ISF 0
#define MSR_HV 0 #define MSR_HV 0
#define MSR_S 0 #define MSR_S 0
#endif #endif
@ -134,7 +131,7 @@
#define MSR_64BIT MSR_SF #define MSR_64BIT MSR_SF
/* Server variant */ /* Server variant */
#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) #define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV)
#ifdef __BIG_ENDIAN__ #ifdef __BIG_ENDIAN__
#define MSR_ __MSR #define MSR_ __MSR
#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV) #define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV)

View File

@ -969,7 +969,7 @@ _GLOBAL(enter_prom)
mtsrr1 r11 mtsrr1 r11
rfi rfi
#else /* CONFIG_PPC_BOOK3E */ #else /* CONFIG_PPC_BOOK3E */
LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
andc r11,r11,r12 andc r11,r11,r12
mtsrr1 r11 mtsrr1 r11
RFI_TO_KERNEL RFI_TO_KERNEL

View File

@ -870,8 +870,7 @@ enable_64b_mode:
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
mtmsr r11 mtmsr r11
#else /* CONFIG_PPC_BOOK3E */ #else /* CONFIG_PPC_BOOK3E */
li r12,(MSR_64BIT | MSR_ISF)@highest LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
sldi r12,r12,48
or r11,r11,r12 or r11,r11,r12
mtmsrd r11 mtmsrd r11
isync isync

View File

@ -239,7 +239,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
smsr |= (guest_msr & vcpu->arch.guest_owned_ext); smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
/* 64-bit Process MSR values */ /* 64-bit Process MSR values */
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
smsr |= MSR_ISF | MSR_HV; smsr |= MSR_HV;
#endif #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* /*