more s390 updates for 5.19 merge window
- Add Eric Farman as maintainer for s390 virtio drivers. - Improve machine check handling, and avoid incorrectly injecting a machine check into a kvm guest. - Add cond_resched() call to gmap page table walker in order to avoid possible huge latencies. Also use non-quiesing sske instruction to speed up storage key handling. - Add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP so s390 behaves similar like common code. - Get sie control block address from correct stack slot in perf event code. This fixes potential random memory accesses. - Change uaccess code so that the exception handler sets the result of get_user() and __get_kernel_nofault() to zero in case of a fault. Until now this was done via input parameters for inline assemblies. Doing it via fault handling is what most or even all other architectures are doing. - Couple of other small cleanups and fixes. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAmKZ2IMACgkQIg7DeRsp bsIXgg/+JeKOFIfwWWZuk2/2drITjQxWL6OwAV/vHVL9bYVGGRgFHTfF7Qslr2Hb j/kh55hajjMt00bVS6P52I7cC+xB6PcxPK4jF+u2FXcL187QnWZg7VD7qkFO3E7Q G5jiWub0eNfd3ijytzSO1yLwv3Rh6GEIOi7lRkk88Fe2B9l0AaXbvnr7rrIoG1SS TCPUoCCNKEH+xPmujdN5B6CDK2ldukcPZHtAJ9Qxu6DWAWIxh+hHr/c1zW9/7kDj Vogc3gcgApeXTsMZu38c2tFiv6wxvg37cMa0EW+l5zkyeFn+a1CLSxA/qPi0N1UY pFcxrlWXWshr7vKn16lJoCe1nBVyfb9ohToLKQtnWB7RZ86lP79tVjOpiQ4qi+jl 54yx/rdtycEDaC0w4ab5kfZPc9/EeaY4ppaOLjh4+r/Ve3x+EMcAZ9Q2Ei3ltRBy 75kswnRvHDWMbtS8rNecdk29QNRvLpnzpGLWQ4wGCV7V9FzTtJwO5mdjuibhSoI8 9dZw4/HGMeA2P1pAE/d1YqKbdgqlNzDyU1dYpk6PVBI0I9mUu36eZXQnjrBN++ki 8TWQYkBv6vnUHJmkfEK7B/thYwljzlQJSje4ebj0aPWzBJ9An+U5ozoANlAu97MR /EVZM67snrT6aSOjhF6SNjWqMGBOlMVecF+GCsVrFEeah+qrYq4= =BOi4 -----END PGP SIGNATURE----- Merge tag 's390-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull more s390 updates from Heiko Carstens: "Just a couple of small improvements, bug fixes and cleanups: - Add Eric Farman as maintainer for s390 virtio drivers. - Improve machine check handling, and avoid incorrectly injecting a machine check into a kvm guest. - Add cond_resched() call to gmap page table walker in order to avoid possible huge latencies. Also use non-quiesing sske instruction to speed up storage key handling. - Add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP so s390 behaves similar like common code. - Get sie control block address from correct stack slot in perf event code. This fixes potential random memory accesses. - Change uaccess code so that the exception handler sets the result of get_user() and __get_kernel_nofault() to zero in case of a fault. Until now this was done via input parameters for inline assemblies. Doing it via fault handling is what most or even all other architectures are doing. - Couple of other small cleanups and fixes" * tag 's390-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/stack: add union to reflect kvm stack slot usages s390/stack: merge empty stack frame slots s390/uaccess: whitespace cleanup s390/uaccess: use __noreturn instead of __attribute__((noreturn)) s390/uaccess: use exception handler to zero result on get_user() failure s390/uaccess: use symbolic names for inline assembler operands s390/mcck: isolate SIE instruction when setting CIF_MCCK_GUEST flag s390/mm: use non-quiescing sske for KVM switch to keyed guest s390/gmap: voluntarily schedule during key setting MAINTAINERS: Update s390 virtio-ccw s390/kexec: add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP s390/Kconfig.debug: fix indentation s390/Kconfig: fix indentation s390/perf: obtain sie_block from the right address s390: generate register offsets into pt_regs automatically s390: simplify early program check handler s390/crypto: fix scatterwalk_unmap() callers in AES-GCM
This commit is contained in:
commit
4ab6cfc4ad
|
@ -21057,6 +21057,7 @@ F: include/uapi/linux/virtio_crypto.h
|
|||
VIRTIO DRIVERS FOR S390
|
||||
M: Cornelia Huck <cohuck@redhat.com>
|
||||
M: Halil Pasic <pasic@linux.ibm.com>
|
||||
M: Eric Farman <farman@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
L: kvm@vger.kernel.org
|
||||
|
|
|
@ -732,11 +732,11 @@ config VFIO_AP
|
|||
depends on S390_AP_IOMMU && VFIO_MDEV && KVM
|
||||
depends on ZCRYPT
|
||||
help
|
||||
This driver grants access to Adjunct Processor (AP) devices
|
||||
via the VFIO mediated device interface.
|
||||
This driver grants access to Adjunct Processor (AP) devices
|
||||
via the VFIO mediated device interface.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called vfio_ap.
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called vfio_ap.
|
||||
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@ config DEBUG_ENTRY
|
|||
If unsure, say N.
|
||||
|
||||
config CIO_INJECT
|
||||
bool "CIO Inject interfaces"
|
||||
depends on DEBUG_KERNEL && DEBUG_FS
|
||||
help
|
||||
This option provides a debugging facility to inject certain artificial events
|
||||
and instruction responses to the CIO layer of Linux kernel. The newly created
|
||||
debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
|
||||
bool "CIO Inject interfaces"
|
||||
depends on DEBUG_KERNEL && DEBUG_FS
|
||||
help
|
||||
This option provides a debugging facility to inject certain artificial events
|
||||
and instruction responses to the CIO layer of Linux kernel. The newly created
|
||||
debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
|
||||
|
|
|
@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
|||
unsigned int nbytes)
|
||||
{
|
||||
gw->walk_bytes_remain -= nbytes;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_unmap(gw->walk_ptr);
|
||||
scatterwalk_advance(&gw->walk, nbytes);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
gw->walk_ptr = NULL;
|
||||
|
@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
|||
goto out;
|
||||
}
|
||||
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_unmap(gw->walk_ptr);
|
||||
gw->walk_ptr = NULL;
|
||||
|
||||
gw->ptr = gw->buf;
|
||||
|
|
|
@ -3,12 +3,24 @@
|
|||
#define __ASM_EXTABLE_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/bits.h>
|
||||
#include <asm/asm-const.h>
|
||||
|
||||
#define EX_TYPE_NONE 0
|
||||
#define EX_TYPE_FIXUP 1
|
||||
#define EX_TYPE_BPF 2
|
||||
#define EX_TYPE_UACCESS 3
|
||||
#define EX_TYPE_NONE 0
|
||||
#define EX_TYPE_FIXUP 1
|
||||
#define EX_TYPE_BPF 2
|
||||
#define EX_TYPE_UA_STORE 3
|
||||
#define EX_TYPE_UA_LOAD_MEM 4
|
||||
#define EX_TYPE_UA_LOAD_REG 5
|
||||
|
||||
#define EX_DATA_REG_ERR_SHIFT 0
|
||||
#define EX_DATA_REG_ERR GENMASK(3, 0)
|
||||
|
||||
#define EX_DATA_REG_ADDR_SHIFT 4
|
||||
#define EX_DATA_REG_ADDR GENMASK(7, 4)
|
||||
|
||||
#define EX_DATA_LEN_SHIFT 8
|
||||
#define EX_DATA_LEN GENMASK(11, 8)
|
||||
|
||||
#define __EX_TABLE(_section, _fault, _target, _type) \
|
||||
stringify_in_c(.section _section,"a";) \
|
||||
|
@ -19,35 +31,58 @@
|
|||
stringify_in_c(.short 0;) \
|
||||
stringify_in_c(.previous)
|
||||
|
||||
#define __EX_TABLE_UA(_section, _fault, _target, _type, _reg) \
|
||||
stringify_in_c(.section _section,"a";) \
|
||||
stringify_in_c(.align 4;) \
|
||||
stringify_in_c(.long (_fault) - .;) \
|
||||
stringify_in_c(.long (_target) - .;) \
|
||||
stringify_in_c(.short (_type);) \
|
||||
stringify_in_c(.macro extable_reg reg;) \
|
||||
stringify_in_c(.set .Lfound, 0;) \
|
||||
stringify_in_c(.set .Lregnr, 0;) \
|
||||
stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \
|
||||
stringify_in_c(.ifc "\reg", "%%\rs";) \
|
||||
stringify_in_c(.set .Lfound, 1;) \
|
||||
stringify_in_c(.short .Lregnr;) \
|
||||
stringify_in_c(.endif;) \
|
||||
stringify_in_c(.set .Lregnr, .Lregnr+1;) \
|
||||
stringify_in_c(.endr;) \
|
||||
stringify_in_c(.ifne (.Lfound != 1);) \
|
||||
stringify_in_c(.error "extable_reg: bad register argument";) \
|
||||
stringify_in_c(.endif;) \
|
||||
stringify_in_c(.endm;) \
|
||||
stringify_in_c(extable_reg _reg;) \
|
||||
stringify_in_c(.purgem extable_reg;) \
|
||||
#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
|
||||
stringify_in_c(.section _section,"a";) \
|
||||
stringify_in_c(.align 4;) \
|
||||
stringify_in_c(.long (_fault) - .;) \
|
||||
stringify_in_c(.long (_target) - .;) \
|
||||
stringify_in_c(.short (_type);) \
|
||||
stringify_in_c(.macro extable_reg regerr, regaddr;) \
|
||||
stringify_in_c(.set .Lfound, 0;) \
|
||||
stringify_in_c(.set .Lcurr, 0;) \
|
||||
stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \
|
||||
stringify_in_c( .ifc "\regerr", "%%r\rs";) \
|
||||
stringify_in_c( .set .Lfound, 1;) \
|
||||
stringify_in_c( .set .Lregerr, .Lcurr;) \
|
||||
stringify_in_c( .endif;) \
|
||||
stringify_in_c( .set .Lcurr, .Lcurr+1;) \
|
||||
stringify_in_c(.endr;) \
|
||||
stringify_in_c(.ifne (.Lfound != 1);) \
|
||||
stringify_in_c( .error "extable_reg: bad register argument1";) \
|
||||
stringify_in_c(.endif;) \
|
||||
stringify_in_c(.set .Lfound, 0;) \
|
||||
stringify_in_c(.set .Lcurr, 0;) \
|
||||
stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \
|
||||
stringify_in_c( .ifc "\regaddr", "%%r\rs";) \
|
||||
stringify_in_c( .set .Lfound, 1;) \
|
||||
stringify_in_c( .set .Lregaddr, .Lcurr;) \
|
||||
stringify_in_c( .endif;) \
|
||||
stringify_in_c( .set .Lcurr, .Lcurr+1;) \
|
||||
stringify_in_c(.endr;) \
|
||||
stringify_in_c(.ifne (.Lfound != 1);) \
|
||||
stringify_in_c( .error "extable_reg: bad register argument2";) \
|
||||
stringify_in_c(.endif;) \
|
||||
stringify_in_c(.short .Lregerr << EX_DATA_REG_ERR_SHIFT | \
|
||||
.Lregaddr << EX_DATA_REG_ADDR_SHIFT | \
|
||||
_len << EX_DATA_LEN_SHIFT;) \
|
||||
stringify_in_c(.endm;) \
|
||||
stringify_in_c(extable_reg _regerr,_regaddr;) \
|
||||
stringify_in_c(.purgem extable_reg;) \
|
||||
stringify_in_c(.previous)
|
||||
|
||||
#define EX_TABLE(_fault, _target) \
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
|
||||
|
||||
#define EX_TABLE_AMODE31(_fault, _target) \
|
||||
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
|
||||
#define EX_TABLE_UA(_fault, _target, _reg) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg)
|
||||
|
||||
#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
|
||||
|
||||
#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
|
||||
|
||||
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
|
||||
|
||||
#endif /* __ASM_EXTABLE_H */
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
|
||||
|
||||
/* Allocate control page with GFP_DMA */
|
||||
#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
|
||||
#define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY)
|
||||
|
||||
/* Maximum address we can use for the crash control pages */
|
||||
#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
|
||||
|
|
|
@ -304,12 +304,6 @@ static __always_inline void __noreturn disabled_wait(void)
|
|||
while (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic Program Check Handler.
|
||||
*/
|
||||
extern void s390_base_pgm_handler(void);
|
||||
extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
|
||||
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
|
||||
|
||||
extern int memcpy_real(void *, unsigned long, size_t);
|
||||
|
|
|
@ -39,8 +39,15 @@ static inline bool on_stack(struct stack_info *info,
|
|||
* Kernel uses the packed stack layout (-mpacked-stack).
|
||||
*/
|
||||
struct stack_frame {
|
||||
unsigned long empty1[5];
|
||||
unsigned int empty2[8];
|
||||
union {
|
||||
unsigned long empty[9];
|
||||
struct {
|
||||
unsigned long sie_control_block;
|
||||
unsigned long sie_savearea;
|
||||
unsigned long sie_reason;
|
||||
unsigned long sie_flags;
|
||||
};
|
||||
};
|
||||
unsigned long gprs[10];
|
||||
unsigned long back_chain;
|
||||
};
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* S390 version
|
||||
* Copyright IBM Corp. 1999, 2000
|
||||
* Author(s): Hartmut Penner (hp@de.ibm.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* Derived from "include/asm-i386/uaccess.h"
|
||||
*/
|
||||
|
@ -55,9 +55,6 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
|
|||
return n;
|
||||
}
|
||||
|
||||
int __put_user_bad(void) __attribute__((noreturn));
|
||||
int __get_user_bad(void) __attribute__((noreturn));
|
||||
|
||||
union oac {
|
||||
unsigned int val;
|
||||
struct {
|
||||
|
@ -80,8 +77,14 @@ union oac {
|
|||
};
|
||||
};
|
||||
|
||||
#define __put_get_user_asm(to, from, size, oac_spec) \
|
||||
int __noreturn __put_user_bad(void);
|
||||
|
||||
#define __put_user_asm(to, from, size) \
|
||||
({ \
|
||||
union oac __oac_spec = { \
|
||||
.oac1.as = PSW_BITS_AS_SECONDARY, \
|
||||
.oac1.a = 1, \
|
||||
}; \
|
||||
int __rc; \
|
||||
\
|
||||
asm volatile( \
|
||||
|
@ -89,26 +92,15 @@ union oac {
|
|||
"0: mvcos %[_to],%[_from],%[_size]\n" \
|
||||
"1: xr %[rc],%[rc]\n" \
|
||||
"2:\n" \
|
||||
EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \
|
||||
EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
|
||||
EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
|
||||
: [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
|
||||
: [_size] "d" (size), [_from] "Q" (*(from)), \
|
||||
[spec] "d" (oac_spec.val) \
|
||||
[spec] "d" (__oac_spec.val) \
|
||||
: "cc", "0"); \
|
||||
__rc; \
|
||||
})
|
||||
|
||||
#define __put_user_asm(to, from, size) \
|
||||
__put_get_user_asm(to, from, size, ((union oac) { \
|
||||
.oac1.as = PSW_BITS_AS_SECONDARY, \
|
||||
.oac1.a = 1 \
|
||||
}))
|
||||
|
||||
#define __get_user_asm(to, from, size) \
|
||||
__put_get_user_asm(to, from, size, ((union oac) { \
|
||||
.oac2.as = PSW_BITS_AS_SECONDARY, \
|
||||
.oac2.a = 1 \
|
||||
})) \
|
||||
|
||||
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
|
||||
{
|
||||
int rc;
|
||||
|
@ -141,6 +133,31 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
|
|||
return rc;
|
||||
}
|
||||
|
||||
int __noreturn __get_user_bad(void);
|
||||
|
||||
#define __get_user_asm(to, from, size) \
|
||||
({ \
|
||||
union oac __oac_spec = { \
|
||||
.oac2.as = PSW_BITS_AS_SECONDARY, \
|
||||
.oac2.a = 1, \
|
||||
}; \
|
||||
int __rc; \
|
||||
\
|
||||
asm volatile( \
|
||||
" lr 0,%[spec]\n" \
|
||||
"0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
|
||||
"1: xr %[rc],%[rc]\n" \
|
||||
"2:\n" \
|
||||
EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
|
||||
EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
|
||||
: [rc] "=&d" (__rc), "=Q" (*(to)) \
|
||||
: [_size] "d" (size), [_from] "Q" (*(from)), \
|
||||
[spec] "d" (__oac_spec.val), [_to] "a" (to), \
|
||||
[_ksize] "K" (size) \
|
||||
: "cc", "0"); \
|
||||
__rc; \
|
||||
})
|
||||
|
||||
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
|
||||
{
|
||||
int rc;
|
||||
|
@ -177,77 +194,77 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
|
|||
* These are the main single-value transfer routines. They automatically
|
||||
* use the right size if we just have the right pointer type.
|
||||
*/
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __x = (x); \
|
||||
int __pu_err = -EFAULT; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof (*(ptr))) { \
|
||||
case 1: \
|
||||
case 2: \
|
||||
case 4: \
|
||||
case 8: \
|
||||
__pu_err = __put_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __x = (x); \
|
||||
int __pu_err = -EFAULT; \
|
||||
\
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
case 2: \
|
||||
case 4: \
|
||||
case 8: \
|
||||
__pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
})
|
||||
|
||||
#define put_user(x, ptr) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__put_user(x, ptr); \
|
||||
#define put_user(x, ptr) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__put_user(x, ptr); \
|
||||
})
|
||||
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
int __gu_err = -EFAULT; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: { \
|
||||
unsigned char __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 2: { \
|
||||
unsigned short __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 4: { \
|
||||
unsigned int __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 8: { \
|
||||
unsigned long long __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
default: \
|
||||
__get_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
int __gu_err = -EFAULT; \
|
||||
\
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: { \
|
||||
unsigned char __x; \
|
||||
\
|
||||
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 2: { \
|
||||
unsigned short __x; \
|
||||
\
|
||||
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 4: { \
|
||||
unsigned int __x; \
|
||||
\
|
||||
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 8: { \
|
||||
unsigned long __x; \
|
||||
\
|
||||
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
|
||||
break; \
|
||||
}; \
|
||||
default: \
|
||||
__get_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
})
|
||||
|
||||
#define get_user(x, ptr) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__get_user(x, ptr); \
|
||||
#define get_user(x, ptr) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__get_user(x, ptr); \
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -278,19 +295,20 @@ int __noreturn __put_kernel_bad(void);
|
|||
int __rc; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: " insn " %2,%1\n" \
|
||||
"1: xr %0,%0\n" \
|
||||
"0: " insn " %[_val],%[_to]\n" \
|
||||
"1: xr %[rc],%[rc]\n" \
|
||||
"2:\n" \
|
||||
EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
|
||||
: "=d" (__rc), "+Q" (*(to)) \
|
||||
: "d" (val) \
|
||||
EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
|
||||
EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
|
||||
: [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
|
||||
: [_val] "d" (val) \
|
||||
: "cc"); \
|
||||
__rc; \
|
||||
})
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
u64 __x = (u64)(*((type *)(src))); \
|
||||
unsigned long __x = (unsigned long)(*((type *)(src))); \
|
||||
int __pk_err; \
|
||||
\
|
||||
switch (sizeof(type)) { \
|
||||
|
@ -321,12 +339,13 @@ int __noreturn __get_kernel_bad(void);
|
|||
int __rc; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: " insn " %1,%2\n" \
|
||||
"1: xr %0,%0\n" \
|
||||
"0: " insn " %[_val],%[_from]\n" \
|
||||
"1: xr %[rc],%[rc]\n" \
|
||||
"2:\n" \
|
||||
EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
|
||||
: "=d" (__rc), "+d" (val) \
|
||||
: "Q" (*(from)) \
|
||||
EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
|
||||
EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
|
||||
: [rc] "=d" (__rc), [_val] "=d" (val) \
|
||||
: [_from] "Q" (*(from)) \
|
||||
: "cc"); \
|
||||
__rc; \
|
||||
})
|
||||
|
@ -337,28 +356,28 @@ do { \
|
|||
\
|
||||
switch (sizeof(type)) { \
|
||||
case 1: { \
|
||||
u8 __x = 0; \
|
||||
unsigned char __x; \
|
||||
\
|
||||
__gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
|
||||
*((type *)(dst)) = (type)__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 2: { \
|
||||
u16 __x = 0; \
|
||||
unsigned short __x; \
|
||||
\
|
||||
__gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
|
||||
*((type *)(dst)) = (type)__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 4: { \
|
||||
u32 __x = 0; \
|
||||
unsigned int __x; \
|
||||
\
|
||||
__gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
|
||||
*((type *)(dst)) = (type)__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 8: { \
|
||||
u64 __x = 0; \
|
||||
unsigned long __x; \
|
||||
\
|
||||
__gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
|
||||
*((type *)(dst)) = (type)__x; \
|
||||
|
|
|
@ -33,7 +33,7 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
|
|||
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
|
||||
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
|
||||
|
||||
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
|
||||
obj-y := traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
|
||||
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
|
||||
|
|
|
@ -32,6 +32,22 @@ int main(void)
|
|||
/* pt_regs offsets */
|
||||
OFFSET(__PT_PSW, pt_regs, psw);
|
||||
OFFSET(__PT_GPRS, pt_regs, gprs);
|
||||
OFFSET(__PT_R0, pt_regs, gprs[0]);
|
||||
OFFSET(__PT_R1, pt_regs, gprs[1]);
|
||||
OFFSET(__PT_R2, pt_regs, gprs[2]);
|
||||
OFFSET(__PT_R3, pt_regs, gprs[3]);
|
||||
OFFSET(__PT_R4, pt_regs, gprs[4]);
|
||||
OFFSET(__PT_R5, pt_regs, gprs[5]);
|
||||
OFFSET(__PT_R6, pt_regs, gprs[6]);
|
||||
OFFSET(__PT_R7, pt_regs, gprs[7]);
|
||||
OFFSET(__PT_R8, pt_regs, gprs[8]);
|
||||
OFFSET(__PT_R9, pt_regs, gprs[9]);
|
||||
OFFSET(__PT_R10, pt_regs, gprs[10]);
|
||||
OFFSET(__PT_R11, pt_regs, gprs[11]);
|
||||
OFFSET(__PT_R12, pt_regs, gprs[12]);
|
||||
OFFSET(__PT_R13, pt_regs, gprs[13]);
|
||||
OFFSET(__PT_R14, pt_regs, gprs[14]);
|
||||
OFFSET(__PT_R15, pt_regs, gprs[15]);
|
||||
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
|
||||
OFFSET(__PT_FLAGS, pt_regs, flags);
|
||||
OFFSET(__PT_CR1, pt_regs, cr1);
|
||||
|
@ -41,11 +57,11 @@ int main(void)
|
|||
/* stack_frame offsets */
|
||||
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
|
||||
OFFSET(__SF_GPRS, stack_frame, gprs);
|
||||
OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
|
||||
OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
|
||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
|
||||
OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
|
||||
OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
|
||||
OFFSET(__SF_EMPTY, stack_frame, empty[0]);
|
||||
OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
|
||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
|
||||
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
|
||||
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
|
||||
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
|
||||
BLANK();
|
||||
/* idle data offsets */
|
||||
|
|
|
@ -149,7 +149,7 @@ static __init void setup_topology(void)
|
|||
topology_max_mnest = max_mnest;
|
||||
}
|
||||
|
||||
static void early_pgm_check_handler(struct pt_regs *regs)
|
||||
void __do_early_pgm_check(struct pt_regs *regs)
|
||||
{
|
||||
if (!fixup_exception(regs))
|
||||
disabled_wait();
|
||||
|
@ -159,12 +159,11 @@ static noinline __init void setup_lowcore_early(void)
|
|||
{
|
||||
psw_t psw;
|
||||
|
||||
psw.addr = (unsigned long)s390_base_pgm_handler;
|
||||
psw.addr = (unsigned long)early_pgm_check_handler;
|
||||
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.program_new_psw = psw;
|
||||
s390_base_pgm_handler_fn = early_pgm_check_handler;
|
||||
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,23 +1,13 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* arch/s390/kernel/base.S
|
||||
*
|
||||
* Copyright IBM Corp. 2006, 2007
|
||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
GEN_BR_THUNK %r9
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
__PT_R0 = __PT_GPRS
|
||||
__PT_R8 = __PT_GPRS + 64
|
||||
|
||||
ENTRY(s390_base_pgm_handler)
|
||||
ENTRY(early_pgm_check_handler)
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
|
@ -26,25 +16,8 @@ ENTRY(s390_base_pgm_handler)
|
|||
mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
|
||||
lgr %r2,%r11
|
||||
larl %r1,s390_base_pgm_handler_fn
|
||||
lg %r9,0(%r1)
|
||||
ltgr %r9,%r9
|
||||
jz 1f
|
||||
BASR_EX %r14,%r9
|
||||
brasl %r14,__do_early_pgm_check
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
lpswe __LC_RETURN_PSW
|
||||
1: larl %r13,disabled_wait_psw
|
||||
lpswe 0(%r13)
|
||||
ENDPROC(s390_base_pgm_handler)
|
||||
|
||||
.align 8
|
||||
disabled_wait_psw:
|
||||
.quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
|
||||
|
||||
.section .bss
|
||||
.align 8
|
||||
.globl s390_base_pgm_handler_fn
|
||||
s390_base_pgm_handler_fn:
|
||||
.quad 0
|
||||
.previous
|
||||
ENDPROC(early_pgm_check_handler)
|
|
@ -29,23 +29,6 @@
|
|||
#include <asm/export.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
|
||||
__PT_R0 = __PT_GPRS
|
||||
__PT_R1 = __PT_GPRS + 8
|
||||
__PT_R2 = __PT_GPRS + 16
|
||||
__PT_R3 = __PT_GPRS + 24
|
||||
__PT_R4 = __PT_GPRS + 32
|
||||
__PT_R5 = __PT_GPRS + 40
|
||||
__PT_R6 = __PT_GPRS + 48
|
||||
__PT_R7 = __PT_GPRS + 56
|
||||
__PT_R8 = __PT_GPRS + 64
|
||||
__PT_R9 = __PT_GPRS + 72
|
||||
__PT_R10 = __PT_GPRS + 80
|
||||
__PT_R11 = __PT_GPRS + 88
|
||||
__PT_R12 = __PT_GPRS + 96
|
||||
__PT_R13 = __PT_GPRS + 104
|
||||
__PT_R14 = __PT_GPRS + 112
|
||||
__PT_R15 = __PT_GPRS + 120
|
||||
|
||||
STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
|
||||
STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
@ -268,6 +251,10 @@ ENTRY(sie64a)
|
|||
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
.Lsie_entry:
|
||||
sie 0(%r14)
|
||||
# Let the next instruction be NOP to avoid triggering a machine check
|
||||
# and handling it in a guest as result of the instruction execution.
|
||||
nopr 7
|
||||
.Lsie_leave:
|
||||
BPOFF
|
||||
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
.Lsie_skip:
|
||||
|
@ -564,7 +551,7 @@ ENTRY(mcck_int_handler)
|
|||
jno .Lmcck_panic
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
|
||||
OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
|
||||
OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
|
||||
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
|
||||
j 5f
|
||||
4: CHKSTG .Lmcck_panic
|
||||
|
|
|
@ -17,10 +17,12 @@ void ext_int_handler(void);
|
|||
void io_int_handler(void);
|
||||
void mcck_int_handler(void);
|
||||
void restart_int_handler(void);
|
||||
void early_pgm_check_handler(void);
|
||||
|
||||
void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs);
|
||||
void __do_pgm_check(struct pt_regs *regs);
|
||||
void __do_syscall(struct pt_regs *regs, int per_trap);
|
||||
void __do_early_pgm_check(struct pt_regs *regs);
|
||||
|
||||
void do_protection_exception(struct pt_regs *regs);
|
||||
void do_dat_exception(struct pt_regs *regs);
|
||||
|
|
|
@ -30,7 +30,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
|
|||
if (!stack)
|
||||
return NULL;
|
||||
|
||||
return (struct kvm_s390_sie_block *) stack->empty1[0];
|
||||
return (struct kvm_s390_sie_block *)stack->sie_control_block;
|
||||
}
|
||||
|
||||
static bool is_in_guest(struct pt_regs *regs)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/panic.h>
|
||||
#include <asm/asm-extable.h>
|
||||
|
@ -24,9 +26,34 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_r
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool ex_handler_uaccess(const struct exception_table_entry *ex, struct pt_regs *regs)
|
||||
static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs)
|
||||
{
|
||||
regs->gprs[ex->data] = -EFAULT;
|
||||
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
|
||||
|
||||
regs->gprs[reg_err] = -EFAULT;
|
||||
regs->psw.addr = extable_fixup(ex);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
|
||||
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
|
||||
size_t len = FIELD_GET(EX_DATA_LEN, ex->data);
|
||||
|
||||
regs->gprs[reg_err] = -EFAULT;
|
||||
memset((void *)regs->gprs[reg_addr], 0, len);
|
||||
regs->psw.addr = extable_fixup(ex);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int reg_zero = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
|
||||
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
|
||||
|
||||
regs->gprs[reg_err] = -EFAULT;
|
||||
regs->gprs[reg_zero] = 0;
|
||||
regs->psw.addr = extable_fixup(ex);
|
||||
return true;
|
||||
}
|
||||
|
@ -43,8 +70,12 @@ bool fixup_exception(struct pt_regs *regs)
|
|||
return ex_handler_fixup(ex, regs);
|
||||
case EX_TYPE_BPF:
|
||||
return ex_handler_bpf(ex, regs);
|
||||
case EX_TYPE_UACCESS:
|
||||
return ex_handler_uaccess(ex, regs);
|
||||
case EX_TYPE_UA_STORE:
|
||||
return ex_handler_ua_store(ex, regs);
|
||||
case EX_TYPE_UA_LOAD_MEM:
|
||||
return ex_handler_ua_load_mem(ex, regs);
|
||||
case EX_TYPE_UA_LOAD_REG:
|
||||
return ex_handler_ua_load_reg(ex, regs);
|
||||
}
|
||||
panic("invalid exception table entry");
|
||||
}
|
||||
|
|
|
@ -2608,6 +2608,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give a chance to schedule after setting a key to 256 pages.
|
||||
* We only hold the mm lock, which is a rwsem and the kvm srcu.
|
||||
* Both can sleep.
|
||||
*/
|
||||
static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
cond_resched();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
||||
unsigned long hmask, unsigned long next,
|
||||
struct mm_walk *walk)
|
||||
|
@ -2630,12 +2642,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
|||
end = start + HPAGE_SIZE - 1;
|
||||
__storage_key_init_range(start, end);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
cond_resched();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mm_walk_ops enable_skey_walk_ops = {
|
||||
.hugetlb_entry = __s390_enable_skey_hugetlb,
|
||||
.pte_entry = __s390_enable_skey_pte,
|
||||
.pmd_entry = __s390_enable_skey_pmd,
|
||||
};
|
||||
|
||||
int s390_enable_skey(void)
|
||||
|
|
|
@ -748,7 +748,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|||
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
|
||||
ptev = pte_val(*ptep);
|
||||
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
|
||||
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue