s390 updates for the 6.4 merge window
- Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmRM8pwACgkQjYWKoQLX FBjV1AgAlvAhu1XkwOdwqdT4GqE8pcN4XXzydog1MYihrSO2PdgWAxpEW7o2QURN W+3xa6RIqt7nX2YBiwTanMZ12TYaFY7noGl3eUpD/NhueprweVirVl7VZUEuRoW/ j0mbx77xsVzLfuDFxkpVwE6/j+tTO78kLyjUHwcN9rFVUaL7/orJneDJf+V8fZG0 sHLOv0aljF7Jr2IIkw82lCmW/vdk7k0dACWMXK2kj1H3dIK34B9X4AdKDDf/WKXk /OSElBeZ93tSGEfNDRIda6iR52xocROaRnQAaDtargKFl9VO0/dN9ADxO+SLNHjN pFE/9VD6xT/xo4IuZZh/Z3TcYfiLvA== =Geqx -----END PGP SIGNATURE----- Merge tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Vasily Gorbik: - Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code * tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (118 commits) s390/stackleak: provide fast __stackleak_poison() implementation stackleak: allow to specify arch specific stackleak poison function s390: select ARCH_USE_SYM_ANNOTATIONS s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc() s390: wire up memfd_secret system call s390/mm: enable ARCH_HAS_SET_DIRECT_MAP s390/mm: use BIT macro to generate SET_MEMORY bit masks s390/relocate_kernel: adjust indentation s390/relocate_kernel: use SYM* macros instead of ENTRY(), etc. s390/entry: use SYM* macros instead of ENTRY(), etc. s390/purgatory: use SYM* macros instead of ENTRY(), etc. s390/kprobes: use SYM* macros instead of ENTRY(), etc. s390/reipl: use SYM* macros instead of ENTRY(), etc. s390/head64: use SYM* macros instead of ENTRY(), etc. s390/earlypgm: use SYM* macros instead of ENTRY(), etc. s390/mcount: use SYM* macros instead of ENTRY(), etc. s390/crc32le: use SYM* macros instead of ENTRY(), etc. s390/crc32be: use SYM* macros instead of ENTRY(), etc. s390/crypto,chacha: use SYM* macros instead of ENTRY(), etc. s390/amode31: use SYM* macros instead of ENTRY(), etc. ...
This commit is contained in:
commit
10de638d8e
|
@ -5,7 +5,7 @@
|
|||
#
|
||||
# Architecture requirements
|
||||
#
|
||||
# * arm/arm64/powerpc
|
||||
# * arm/arm64/powerpc/s390
|
||||
#
|
||||
# Rely on implicit context synchronization as a result of exception return
|
||||
# when returning from IPI handler, and when returning to user-space.
|
||||
|
@ -45,7 +45,7 @@
|
|||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| riscv: | TODO |
|
||||
| s390: | TODO |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
| sparc: | TODO |
|
||||
| um: | TODO |
|
||||
|
|
|
@ -26,10 +26,6 @@ config GENERIC_BUG
|
|||
config GENERIC_BUG_RELATIVE_POINTERS
|
||||
def_bool y
|
||||
|
||||
config GENERIC_CSUM
|
||||
bool
|
||||
default y if KASAN
|
||||
|
||||
config GENERIC_LOCKBREAK
|
||||
def_bool y if PREEMPTION
|
||||
|
||||
|
@ -76,10 +72,12 @@ config S390
|
|||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
select ARCH_HAS_MEM_ENCRYPT
|
||||
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SCALED_CPUTIME
|
||||
select ARCH_HAS_SET_DIRECT_MAP
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
|
@ -123,6 +121,7 @@ config S390
|
|||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_SYM_ANNOTATIONS
|
||||
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
select ARCH_WANTS_NO_INSTR
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
|
@ -132,6 +131,8 @@ config S390
|
|||
select CLONE_BACKWARDS2
|
||||
select DMA_OPS if PCI
|
||||
select DYNAMIC_FTRACE if FUNCTION_TRACER
|
||||
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
|
||||
select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC
|
||||
select GCC12_NO_ARRAY_BOUNDS
|
||||
select GENERIC_ALLOCATOR
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
|
@ -153,6 +154,7 @@ config S390
|
|||
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
select HAVE_ARCH_STACKLEAK
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_ARCH_VMAP_STACK
|
||||
|
|
|
@ -66,16 +66,6 @@ static struct ctl_table appldata_table[] = {
|
|||
{ },
|
||||
};
|
||||
|
||||
static struct ctl_table appldata_dir_table[] = {
|
||||
{
|
||||
.procname = appldata_proc_name,
|
||||
.maxlen = 0,
|
||||
.mode = S_IRUGO | S_IXUGO,
|
||||
.child = appldata_table,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
/*
|
||||
* Timer
|
||||
*/
|
||||
|
@ -291,7 +281,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
|
|||
mutex_lock(&appldata_ops_mutex);
|
||||
list_for_each(lh, &appldata_ops_list) {
|
||||
tmp_ops = list_entry(lh, struct appldata_ops, list);
|
||||
if (&tmp_ops->ctl_table[2] == ctl) {
|
||||
if (&tmp_ops->ctl_table[0] == ctl) {
|
||||
found = 1;
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +351,8 @@ int appldata_register_ops(struct appldata_ops *ops)
|
|||
if (ops->size > APPLDATA_MAX_REC_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL);
|
||||
/* The last entry must be an empty one */
|
||||
ops->ctl_table = kcalloc(2, sizeof(struct ctl_table), GFP_KERNEL);
|
||||
if (!ops->ctl_table)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -369,17 +360,12 @@ int appldata_register_ops(struct appldata_ops *ops)
|
|||
list_add(&ops->list, &appldata_ops_list);
|
||||
mutex_unlock(&appldata_ops_mutex);
|
||||
|
||||
ops->ctl_table[0].procname = appldata_proc_name;
|
||||
ops->ctl_table[0].maxlen = 0;
|
||||
ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
|
||||
ops->ctl_table[0].child = &ops->ctl_table[2];
|
||||
ops->ctl_table[0].procname = ops->name;
|
||||
ops->ctl_table[0].mode = S_IRUGO | S_IWUSR;
|
||||
ops->ctl_table[0].proc_handler = appldata_generic_handler;
|
||||
ops->ctl_table[0].data = ops;
|
||||
|
||||
ops->ctl_table[2].procname = ops->name;
|
||||
ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
|
||||
ops->ctl_table[2].proc_handler = appldata_generic_handler;
|
||||
ops->ctl_table[2].data = ops;
|
||||
|
||||
ops->sysctl_header = register_sysctl_table(ops->ctl_table);
|
||||
ops->sysctl_header = register_sysctl(appldata_proc_name, ops->ctl_table);
|
||||
if (!ops->sysctl_header)
|
||||
goto out;
|
||||
return 0;
|
||||
|
@ -422,7 +408,7 @@ static int __init appldata_init(void)
|
|||
appldata_wq = alloc_ordered_workqueue("appldata", 0);
|
||||
if (!appldata_wq)
|
||||
return -ENOMEM;
|
||||
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
|
||||
appldata_sysctl_header = register_sysctl(appldata_proc_name, appldata_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ endif
|
|||
|
||||
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
|
||||
|
||||
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o
|
||||
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
|
||||
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
|
||||
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/physmem_info.h>
|
||||
|
||||
struct machine_info {
|
||||
unsigned char has_edat1 : 1;
|
||||
unsigned char has_edat2 : 1;
|
||||
|
@ -30,24 +32,46 @@ struct vmlinux_info {
|
|||
unsigned long init_mm_off;
|
||||
unsigned long swapper_pg_dir_off;
|
||||
unsigned long invalid_pg_dir_off;
|
||||
#ifdef CONFIG_KASAN
|
||||
unsigned long kasan_early_shadow_page_off;
|
||||
unsigned long kasan_early_shadow_pte_off;
|
||||
unsigned long kasan_early_shadow_pmd_off;
|
||||
unsigned long kasan_early_shadow_pud_off;
|
||||
unsigned long kasan_early_shadow_p4d_off;
|
||||
#endif
|
||||
};
|
||||
|
||||
void startup_kernel(void);
|
||||
unsigned long detect_memory(unsigned long *safe_addr);
|
||||
void mem_detect_set_usable_limit(unsigned long limit);
|
||||
unsigned long detect_max_physmem_end(void);
|
||||
void detect_physmem_online_ranges(unsigned long max_physmem_end);
|
||||
void physmem_set_usable_limit(unsigned long limit);
|
||||
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
|
||||
void physmem_free(enum reserved_range_type type);
|
||||
/* for continuous/multiple allocations per type */
|
||||
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
|
||||
unsigned long align);
|
||||
/* for single allocations, 1 per type */
|
||||
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
|
||||
unsigned long align, unsigned long min, unsigned long max,
|
||||
bool die_on_oom);
|
||||
unsigned long get_physmem_alloc_pos(void);
|
||||
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
|
||||
unsigned long *intersection_start);
|
||||
bool is_ipl_block_dump(void);
|
||||
void store_ipl_parmblock(void);
|
||||
unsigned long read_ipl_report(unsigned long safe_addr);
|
||||
int read_ipl_report(void);
|
||||
void save_ipl_cert_comp_list(void);
|
||||
void setup_boot_command_line(void);
|
||||
void parse_boot_command_line(void);
|
||||
void verify_facilities(void);
|
||||
void print_missing_facilities(void);
|
||||
void sclp_early_setup_buffer(void);
|
||||
void print_pgm_check_info(void);
|
||||
unsigned long get_random_base(unsigned long safe_addr);
|
||||
unsigned long randomize_within_range(unsigned long size, unsigned long align,
|
||||
unsigned long min, unsigned long max);
|
||||
void setup_vmem(unsigned long asce_limit);
|
||||
unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total);
|
||||
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
|
||||
void print_stacktrace(unsigned long sp);
|
||||
void error(char *m);
|
||||
|
||||
extern struct machine_info machine;
|
||||
|
@ -57,12 +81,11 @@ extern const char kernel_version[];
|
|||
extern unsigned long memory_limit;
|
||||
extern unsigned long vmalloc_size;
|
||||
extern int vmalloc_size_set;
|
||||
extern int kaslr_enabled;
|
||||
extern char __boot_data_start[], __boot_data_end[];
|
||||
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
|
||||
extern char _decompressor_syms_start[], _decompressor_syms_end[];
|
||||
extern char _stack_start[], _stack_end[];
|
||||
extern char _end[];
|
||||
extern char _end[], _decompressor_end[];
|
||||
extern unsigned char _compressed_start[];
|
||||
extern unsigned char _compressed_end[];
|
||||
extern struct vmlinux_info _vmlinux_info;
|
||||
|
@ -70,5 +93,10 @@ extern struct vmlinux_info _vmlinux_info;
|
|||
|
||||
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
|
||||
|
||||
static inline bool intersects(unsigned long addr0, unsigned long size0,
|
||||
unsigned long addr1, unsigned long size1)
|
||||
{
|
||||
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* BOOT_BOOT_H */
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
echo "Warning: '${INSTALLKERNEL}' command not available - additional " \
|
||||
"bootloader config required" >&2
|
||||
if [ -f $4/vmlinuz-$1 ]; then mv $4/vmlinuz-$1 $4/vmlinuz-$1.old; fi
|
||||
if [ -f $4/System.map-$1 ]; then mv $4/System.map-$1 $4/System.map-$1.old; fi
|
||||
if [ -f "$4/vmlinuz-$1" ]; then mv -- "$4/vmlinuz-$1" "$4/vmlinuz-$1.old"; fi
|
||||
if [ -f "$4/System.map-$1" ]; then mv -- "$4/System.map-$1" "$4/System.map-$1.old"; fi
|
||||
|
||||
cat $2 > $4/vmlinuz-$1
|
||||
cp $3 $4/System.map-$1
|
||||
cat -- "$2" > "$4/vmlinuz-$1"
|
||||
cp -- "$3" "$4/System.map-$1"
|
||||
|
|
|
@ -24,11 +24,11 @@ int __bootdata(noexec_disabled);
|
|||
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
|
||||
struct ipl_parameter_block __bootdata_preserved(ipl_block);
|
||||
int __bootdata_preserved(ipl_block_valid);
|
||||
int __bootdata_preserved(__kaslr_enabled);
|
||||
|
||||
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
|
||||
unsigned long memory_limit;
|
||||
int vmalloc_size_set;
|
||||
int kaslr_enabled;
|
||||
|
||||
static inline int __diag308(unsigned long subcode, void *addr)
|
||||
{
|
||||
|
@ -264,7 +264,7 @@ void parse_boot_command_line(void)
|
|||
char *args;
|
||||
int rc;
|
||||
|
||||
kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
|
||||
__kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
|
||||
args = strcpy(command_line_buf, early_command_line);
|
||||
while (*args) {
|
||||
args = next_arg(args, ¶m, &val);
|
||||
|
@ -300,7 +300,7 @@ void parse_boot_command_line(void)
|
|||
modify_fac_list(val);
|
||||
|
||||
if (!strcmp(param, "nokaslr"))
|
||||
kaslr_enabled = 0;
|
||||
__kaslr_enabled = 0;
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
if (!strcmp(param, "prot_virt")) {
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <asm/sclp.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <uapi/asm/ipl.h>
|
||||
#include "boot.h"
|
||||
|
||||
|
@ -16,20 +17,16 @@ unsigned long __bootdata_preserved(ipl_cert_list_size);
|
|||
unsigned long __bootdata(early_ipl_comp_list_addr);
|
||||
unsigned long __bootdata(early_ipl_comp_list_size);
|
||||
|
||||
static struct ipl_rb_certificates *certs;
|
||||
static struct ipl_rb_components *comps;
|
||||
static bool ipl_report_needs_saving;
|
||||
|
||||
#define for_each_rb_entry(entry, rb) \
|
||||
for (entry = rb->entries; \
|
||||
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
|
||||
entry++)
|
||||
|
||||
static inline bool intersects(unsigned long addr0, unsigned long size0,
|
||||
unsigned long addr1, unsigned long size1)
|
||||
{
|
||||
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
|
||||
}
|
||||
|
||||
static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
|
||||
struct ipl_rb_certificates *certs,
|
||||
unsigned long safe_addr)
|
||||
static unsigned long get_cert_comp_list_size(void)
|
||||
{
|
||||
struct ipl_rb_certificate_entry *cert;
|
||||
struct ipl_rb_component_entry *comp;
|
||||
|
@ -44,44 +41,27 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
|
|||
ipl_cert_list_size = 0;
|
||||
for_each_rb_entry(cert, certs)
|
||||
ipl_cert_list_size += sizeof(unsigned int) + cert->len;
|
||||
size = ipl_cert_list_size + early_ipl_comp_list_size;
|
||||
|
||||
/*
|
||||
* Start from safe_addr to find a free memory area large
|
||||
* enough for the IPL report boot data. This area is used
|
||||
* for ipl_cert_list_addr/ipl_cert_list_size and
|
||||
* early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
|
||||
* not overlap with any component or any certificate.
|
||||
*/
|
||||
repeat:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
|
||||
intersects(initrd_data.start, initrd_data.size, safe_addr, size))
|
||||
safe_addr = initrd_data.start + initrd_data.size;
|
||||
if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
|
||||
safe_addr = (unsigned long)comps + comps->len;
|
||||
goto repeat;
|
||||
}
|
||||
for_each_rb_entry(comp, comps)
|
||||
if (intersects(safe_addr, size, comp->addr, comp->len)) {
|
||||
safe_addr = comp->addr + comp->len;
|
||||
goto repeat;
|
||||
}
|
||||
if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
|
||||
safe_addr = (unsigned long)certs + certs->len;
|
||||
goto repeat;
|
||||
}
|
||||
for_each_rb_entry(cert, certs)
|
||||
if (intersects(safe_addr, size, cert->addr, cert->len)) {
|
||||
safe_addr = cert->addr + cert->len;
|
||||
goto repeat;
|
||||
}
|
||||
early_ipl_comp_list_addr = safe_addr;
|
||||
ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
|
||||
|
||||
return safe_addr + size;
|
||||
return ipl_cert_list_size + early_ipl_comp_list_size;
|
||||
}
|
||||
|
||||
static void copy_components_bootdata(struct ipl_rb_components *comps)
|
||||
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
|
||||
unsigned long *intersection_start)
|
||||
{
|
||||
struct ipl_rb_certificate_entry *cert;
|
||||
|
||||
if (!ipl_report_needs_saving)
|
||||
return false;
|
||||
|
||||
for_each_rb_entry(cert, certs) {
|
||||
if (intersects(addr, size, cert->addr, cert->len)) {
|
||||
*intersection_start = cert->addr;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void copy_components_bootdata(void)
|
||||
{
|
||||
struct ipl_rb_component_entry *comp, *ptr;
|
||||
|
||||
|
@ -90,7 +70,7 @@ static void copy_components_bootdata(struct ipl_rb_components *comps)
|
|||
memcpy(ptr++, comp, sizeof(*ptr));
|
||||
}
|
||||
|
||||
static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
|
||||
static void copy_certificates_bootdata(void)
|
||||
{
|
||||
struct ipl_rb_certificate_entry *cert;
|
||||
void *ptr;
|
||||
|
@ -104,10 +84,8 @@ static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
|
|||
}
|
||||
}
|
||||
|
||||
unsigned long read_ipl_report(unsigned long safe_addr)
|
||||
int read_ipl_report(void)
|
||||
{
|
||||
struct ipl_rb_certificates *certs;
|
||||
struct ipl_rb_components *comps;
|
||||
struct ipl_pl_hdr *pl_hdr;
|
||||
struct ipl_rl_hdr *rl_hdr;
|
||||
struct ipl_rb_hdr *rb_hdr;
|
||||
|
@ -120,7 +98,7 @@ unsigned long read_ipl_report(unsigned long safe_addr)
|
|||
*/
|
||||
if (!ipl_block_valid ||
|
||||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
|
||||
return safe_addr;
|
||||
return -1;
|
||||
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
|
||||
/*
|
||||
* There is an IPL report, to find it load the pointer to the
|
||||
|
@ -158,16 +136,30 @@ unsigned long read_ipl_report(unsigned long safe_addr)
|
|||
* With either the component list or the certificate list
|
||||
* missing the kernel will stay ignorant of secure IPL.
|
||||
*/
|
||||
if (!comps || !certs)
|
||||
return safe_addr;
|
||||
if (!comps || !certs) {
|
||||
certs = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy component and certificate list to a safe area
|
||||
* where the decompressed kernel can find them.
|
||||
*/
|
||||
safe_addr = find_bootdata_space(comps, certs, safe_addr);
|
||||
copy_components_bootdata(comps);
|
||||
copy_certificates_bootdata(certs);
|
||||
|
||||
return safe_addr;
|
||||
ipl_report_needs_saving = true;
|
||||
physmem_reserve(RR_IPLREPORT, (unsigned long)pl_hdr,
|
||||
(unsigned long)rl_end - (unsigned long)pl_hdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void save_ipl_cert_comp_list(void)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
if (!ipl_report_needs_saving)
|
||||
return;
|
||||
|
||||
size = get_cert_comp_list_size();
|
||||
early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
|
||||
ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
|
||||
|
||||
copy_components_bootdata();
|
||||
copy_certificates_bootdata();
|
||||
physmem_free(RR_IPLREPORT);
|
||||
ipl_report_needs_saving = false;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* Copyright IBM Corp. 2019
|
||||
*/
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <asm/cpacf.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/sclp.h>
|
||||
|
@ -91,113 +91,108 @@ static int get_random(unsigned long limit, unsigned long *value)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sort_reserved_ranges(struct reserved_range *res, unsigned long size)
|
||||
{
|
||||
struct reserved_range tmp;
|
||||
int i, j;
|
||||
|
||||
for (i = 1; i < size; i++) {
|
||||
tmp = res[i];
|
||||
for (j = i - 1; j >= 0 && res[j].start > tmp.start; j--)
|
||||
res[j + 1] = res[j];
|
||||
res[j + 1] = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long iterate_valid_positions(unsigned long size, unsigned long align,
|
||||
unsigned long _min, unsigned long _max,
|
||||
struct reserved_range *res, size_t res_count,
|
||||
bool pos_count, unsigned long find_pos)
|
||||
{
|
||||
unsigned long start, end, tmp_end, range_pos, pos = 0;
|
||||
struct reserved_range *res_end = res + res_count;
|
||||
struct reserved_range *skip_res;
|
||||
int i;
|
||||
|
||||
align = max(align, 8UL);
|
||||
_min = round_up(_min, align);
|
||||
for_each_physmem_usable_range(i, &start, &end) {
|
||||
if (_min >= end)
|
||||
continue;
|
||||
start = round_up(start, align);
|
||||
if (start >= _max)
|
||||
break;
|
||||
start = max(_min, start);
|
||||
end = min(_max, end);
|
||||
|
||||
while (start + size <= end) {
|
||||
/* skip reserved ranges below the start */
|
||||
while (res && res->end <= start) {
|
||||
res++;
|
||||
if (res >= res_end)
|
||||
res = NULL;
|
||||
}
|
||||
skip_res = NULL;
|
||||
tmp_end = end;
|
||||
/* has intersecting reserved range */
|
||||
if (res && res->start < end) {
|
||||
skip_res = res;
|
||||
tmp_end = res->start;
|
||||
}
|
||||
if (start + size <= tmp_end) {
|
||||
range_pos = (tmp_end - start - size) / align + 1;
|
||||
if (pos_count) {
|
||||
pos += range_pos;
|
||||
} else {
|
||||
if (range_pos >= find_pos)
|
||||
return start + (find_pos - 1) * align;
|
||||
find_pos -= range_pos;
|
||||
}
|
||||
}
|
||||
if (!skip_res)
|
||||
break;
|
||||
start = round_up(skip_res->end, align);
|
||||
}
|
||||
}
|
||||
|
||||
return pos_count ? pos : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* To randomize kernel base address we have to consider several facts:
|
||||
* 1. physical online memory might not be continuous and have holes. mem_detect
|
||||
* info contains list of online memory ranges we should consider.
|
||||
* 2. we have several memory regions which are occupied and we should not
|
||||
* overlap and destroy them. Currently safe_addr tells us the border below
|
||||
* which all those occupied regions are. We are safe to use anything above
|
||||
* safe_addr.
|
||||
* 3. the upper limit might apply as well, even if memory above that limit is
|
||||
* online. Currently those limitations are:
|
||||
* 3.1. Limit set by "mem=" kernel command line option
|
||||
* 3.2. memory reserved at the end for kasan initialization.
|
||||
* 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
|
||||
* Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
|
||||
* (16 pages when the kernel is built with kasan enabled)
|
||||
* Assumptions:
|
||||
* 1. kernel size (including .bss size) and upper memory limit are page aligned.
|
||||
* 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
|
||||
* aligned (in practice memory configurations granularity on z/VM and LPAR
|
||||
* is 1mb).
|
||||
* Two types of decompressor memory allocations/reserves are considered
|
||||
* differently.
|
||||
*
|
||||
* To guarantee uniform distribution of kernel base address among all suitable
|
||||
* addresses we generate random value just once. For that we need to build a
|
||||
* continuous range in which every value would be suitable. We can build this
|
||||
* range by simply counting all suitable addresses (let's call them positions)
|
||||
* which would be valid as kernel base address. To count positions we iterate
|
||||
* over online memory ranges. For each range which is big enough for the
|
||||
* kernel image we count all suitable addresses we can put the kernel image at
|
||||
* that is
|
||||
* (end - start - kernel_size) / THREAD_SIZE + 1
|
||||
* Two functions count_valid_kernel_positions and position_to_address help
|
||||
* to count positions in memory range given and then convert position back
|
||||
* to address.
|
||||
* "Static" or "single" allocations are done via physmem_alloc_range() and
|
||||
* physmem_reserve(), and they are listed in physmem_info.reserved[]. Each
|
||||
* type of "static" allocation can only have one allocation per type and
|
||||
* cannot have chains.
|
||||
*
|
||||
* On the other hand, "dynamic" or "repetitive" allocations are done via
|
||||
* physmem_alloc_top_down(). These allocations are tightly packed together
|
||||
* top down from the end of online memory. physmem_alloc_pos represents
|
||||
* current position where those allocations start.
|
||||
*
|
||||
* Functions randomize_within_range() and iterate_valid_positions()
|
||||
* only consider "dynamic" allocations by never looking above
|
||||
* physmem_alloc_pos. "Static" allocations, however, are explicitly
|
||||
* considered by checking the "res" (reserves) array. The first
|
||||
* reserved_range of a "dynamic" allocation may also be checked along the
|
||||
* way, but it will always be above the maximum value anyway.
|
||||
*/
|
||||
static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
|
||||
unsigned long _min,
|
||||
unsigned long _max)
|
||||
unsigned long randomize_within_range(unsigned long size, unsigned long align,
|
||||
unsigned long min, unsigned long max)
|
||||
{
|
||||
unsigned long start, end, pos = 0;
|
||||
int i;
|
||||
struct reserved_range res[RR_MAX];
|
||||
unsigned long max_pos, pos;
|
||||
|
||||
for_each_mem_detect_usable_block(i, &start, &end) {
|
||||
if (_min >= end)
|
||||
continue;
|
||||
if (start >= _max)
|
||||
break;
|
||||
start = max(_min, start);
|
||||
end = min(_max, end);
|
||||
if (end - start < kernel_size)
|
||||
continue;
|
||||
pos += (end - start - kernel_size) / THREAD_SIZE + 1;
|
||||
}
|
||||
memcpy(res, physmem_info.reserved, sizeof(res));
|
||||
sort_reserved_ranges(res, ARRAY_SIZE(res));
|
||||
max = min(max, get_physmem_alloc_pos());
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
|
||||
unsigned long _min, unsigned long _max)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int i;
|
||||
|
||||
for_each_mem_detect_usable_block(i, &start, &end) {
|
||||
if (_min >= end)
|
||||
continue;
|
||||
if (start >= _max)
|
||||
break;
|
||||
start = max(_min, start);
|
||||
end = min(_max, end);
|
||||
if (end - start < kernel_size)
|
||||
continue;
|
||||
if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
|
||||
return start + (pos - 1) * THREAD_SIZE;
|
||||
pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long get_random_base(unsigned long safe_addr)
|
||||
{
|
||||
unsigned long usable_total = get_mem_detect_usable_total();
|
||||
unsigned long memory_limit = get_mem_detect_end();
|
||||
unsigned long base_pos, max_pos, kernel_size;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Avoid putting kernel in the end of physical memory
|
||||
* which vmem and kasan code will use for shadow memory and
|
||||
* pgtable mapping allocations.
|
||||
*/
|
||||
memory_limit -= kasan_estimate_memory_needs(usable_total);
|
||||
memory_limit -= vmem_estimate_memory_needs(usable_total);
|
||||
|
||||
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
|
||||
kernel_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
if (safe_addr + kernel_size > memory_limit)
|
||||
max_pos = iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), true, 0);
|
||||
if (!max_pos)
|
||||
return 0;
|
||||
|
||||
max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
|
||||
if (!max_pos) {
|
||||
sclp_early_printk("KASLR disabled: not enough memory\n");
|
||||
if (get_random(max_pos, &pos))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we need a value in the range [1, base_pos] inclusive */
|
||||
if (get_random(max_pos, &base_pos))
|
||||
return 0;
|
||||
return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
|
||||
return iterate_valid_positions(size, align, min, max, res, ARRAY_SIZE(res), false, pos + 1);
|
||||
}
|
||||
|
|
|
@ -1,191 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/sparsemem.h>
|
||||
#include "decompressor.h"
|
||||
#include "boot.h"
|
||||
|
||||
struct mem_detect_info __bootdata(mem_detect);
|
||||
|
||||
/* up to 256 storage elements, 1020 subincrements each */
|
||||
#define ENTRIES_EXTENDED_MAX \
|
||||
(256 * (1020 / 2) * sizeof(struct mem_detect_block))
|
||||
|
||||
static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
|
||||
{
|
||||
if (n < MEM_INLINED_ENTRIES)
|
||||
return &mem_detect.entries[n];
|
||||
return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
|
||||
}
|
||||
|
||||
/*
|
||||
* sequential calls to add_mem_detect_block with adjacent memory areas
|
||||
* are merged together into single memory block.
|
||||
*/
|
||||
void add_mem_detect_block(u64 start, u64 end)
|
||||
{
|
||||
struct mem_detect_block *block;
|
||||
|
||||
if (mem_detect.count) {
|
||||
block = __get_mem_detect_block_ptr(mem_detect.count - 1);
|
||||
if (block->end == start) {
|
||||
block->end = end;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
block = __get_mem_detect_block_ptr(mem_detect.count);
|
||||
block->start = start;
|
||||
block->end = end;
|
||||
mem_detect.count++;
|
||||
}
|
||||
|
||||
static int __diag260(unsigned long rx1, unsigned long rx2)
|
||||
{
|
||||
unsigned long reg1, reg2, ry;
|
||||
union register_pair rx;
|
||||
psw_t old;
|
||||
int rc;
|
||||
|
||||
rx.even = rx1;
|
||||
rx.odd = rx2;
|
||||
ry = 0x10; /* storage configuration */
|
||||
rc = -1; /* fail */
|
||||
asm volatile(
|
||||
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
|
||||
" epsw %[reg1],%[reg2]\n"
|
||||
" st %[reg1],0(%[psw_pgm])\n"
|
||||
" st %[reg2],4(%[psw_pgm])\n"
|
||||
" larl %[reg1],1f\n"
|
||||
" stg %[reg1],8(%[psw_pgm])\n"
|
||||
" diag %[rx],%[ry],0x260\n"
|
||||
" ipm %[rc]\n"
|
||||
" srl %[rc],28\n"
|
||||
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
|
||||
: [reg1] "=&d" (reg1),
|
||||
[reg2] "=&a" (reg2),
|
||||
[rc] "+&d" (rc),
|
||||
[ry] "+&d" (ry),
|
||||
"+Q" (S390_lowcore.program_new_psw),
|
||||
"=Q" (old)
|
||||
: [rx] "d" (rx.pair),
|
||||
[psw_old] "a" (&old),
|
||||
[psw_pgm] "a" (&S390_lowcore.program_new_psw)
|
||||
: "cc", "memory");
|
||||
return rc == 0 ? ry : -1;
|
||||
}
|
||||
|
||||
static int diag260(void)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
struct {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
|
||||
|
||||
memset(storage_extents, 0, sizeof(storage_extents));
|
||||
rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
|
||||
if (rc == -1)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
|
||||
add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tprot(unsigned long addr)
|
||||
{
|
||||
unsigned long reg1, reg2;
|
||||
int rc = -EFAULT;
|
||||
psw_t old;
|
||||
|
||||
asm volatile(
|
||||
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
|
||||
" epsw %[reg1],%[reg2]\n"
|
||||
" st %[reg1],0(%[psw_pgm])\n"
|
||||
" st %[reg2],4(%[psw_pgm])\n"
|
||||
" larl %[reg1],1f\n"
|
||||
" stg %[reg1],8(%[psw_pgm])\n"
|
||||
" tprot 0(%[addr]),0\n"
|
||||
" ipm %[rc]\n"
|
||||
" srl %[rc],28\n"
|
||||
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
|
||||
: [reg1] "=&d" (reg1),
|
||||
[reg2] "=&a" (reg2),
|
||||
[rc] "+&d" (rc),
|
||||
"=Q" (S390_lowcore.program_new_psw.addr),
|
||||
"=Q" (old)
|
||||
: [psw_old] "a" (&old),
|
||||
[psw_pgm] "a" (&S390_lowcore.program_new_psw),
|
||||
[addr] "a" (addr)
|
||||
: "cc", "memory");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static unsigned long search_mem_end(void)
|
||||
{
|
||||
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
|
||||
unsigned long offset = 0;
|
||||
unsigned long pivot;
|
||||
|
||||
while (range > 1) {
|
||||
range >>= 1;
|
||||
pivot = offset + range;
|
||||
if (!tprot(pivot << 20))
|
||||
offset = pivot;
|
||||
}
|
||||
return (offset + 1) << 20;
|
||||
}
|
||||
|
||||
unsigned long detect_memory(unsigned long *safe_addr)
|
||||
{
|
||||
unsigned long max_physmem_end = 0;
|
||||
|
||||
sclp_early_get_memsize(&max_physmem_end);
|
||||
mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
|
||||
|
||||
if (!sclp_early_read_storage_info()) {
|
||||
mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
|
||||
} else if (!diag260()) {
|
||||
mem_detect.info_source = MEM_DETECT_DIAG260;
|
||||
max_physmem_end = max_physmem_end ?: get_mem_detect_end();
|
||||
} else if (max_physmem_end) {
|
||||
add_mem_detect_block(0, max_physmem_end);
|
||||
mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
|
||||
} else {
|
||||
max_physmem_end = search_mem_end();
|
||||
add_mem_detect_block(0, max_physmem_end);
|
||||
mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
|
||||
}
|
||||
|
||||
if (mem_detect.count > MEM_INLINED_ENTRIES) {
|
||||
*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
|
||||
sizeof(struct mem_detect_block);
|
||||
}
|
||||
|
||||
return max_physmem_end;
|
||||
}
|
||||
|
||||
void mem_detect_set_usable_limit(unsigned long limit)
|
||||
{
|
||||
struct mem_detect_block *block;
|
||||
int i;
|
||||
|
||||
/* make sure mem_detect.usable ends up within online memory block */
|
||||
for (i = 0; i < mem_detect.count; i++) {
|
||||
block = __get_mem_detect_block_ptr(i);
|
||||
if (block->start >= limit)
|
||||
break;
|
||||
if (block->end >= limit) {
|
||||
mem_detect.usable = limit;
|
||||
break;
|
||||
}
|
||||
mem_detect.usable = block->end;
|
||||
}
|
||||
}
|
|
@ -123,11 +123,10 @@ out:
|
|||
sclp_early_printk(buf);
|
||||
}
|
||||
|
||||
static noinline void print_stacktrace(void)
|
||||
void print_stacktrace(unsigned long sp)
|
||||
{
|
||||
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
|
||||
(unsigned long)_stack_end };
|
||||
unsigned long sp = S390_lowcore.gpregs_save_area[15];
|
||||
bool first = true;
|
||||
|
||||
decompressor_printk("Call Trace:\n");
|
||||
|
@ -154,7 +153,7 @@ void print_pgm_check_info(void)
|
|||
decompressor_printk("Kernel command line: %s\n", early_command_line);
|
||||
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
|
||||
S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
|
||||
if (kaslr_enabled)
|
||||
if (kaslr_enabled())
|
||||
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
|
||||
decompressor_printk("PSW : %016lx %016lx (%pS)\n",
|
||||
S390_lowcore.psw_save_area.mask,
|
||||
|
@ -173,7 +172,7 @@ void print_pgm_check_info(void)
|
|||
gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
|
||||
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
|
||||
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
|
||||
print_stacktrace();
|
||||
print_stacktrace(S390_lowcore.gpregs_save_area[15]);
|
||||
decompressor_printk("Last Breaking-Event-Address:\n");
|
||||
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
|
||||
(void *)S390_lowcore.pgm_last_break);
|
||||
|
|
|
@ -0,0 +1,328 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/processor.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/sparsemem.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/uv.h>
|
||||
#include "decompressor.h"
|
||||
#include "boot.h"
|
||||
|
||||
struct physmem_info __bootdata(physmem_info);
|
||||
static unsigned int physmem_alloc_ranges;
|
||||
static unsigned long physmem_alloc_pos;
|
||||
|
||||
/* up to 256 storage elements, 1020 subincrements each */
|
||||
#define ENTRIES_EXTENDED_MAX \
|
||||
(256 * (1020 / 2) * sizeof(struct physmem_range))
|
||||
|
||||
static struct physmem_range *__get_physmem_range_ptr(u32 n)
|
||||
{
|
||||
if (n < MEM_INLINED_ENTRIES)
|
||||
return &physmem_info.online[n];
|
||||
if (unlikely(!physmem_info.online_extended)) {
|
||||
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
|
||||
RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
|
||||
physmem_alloc_pos, true);
|
||||
}
|
||||
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
|
||||
}
|
||||
|
||||
/*
|
||||
* sequential calls to add_physmem_online_range with adjacent memory ranges
|
||||
* are merged together into single memory range.
|
||||
*/
|
||||
void add_physmem_online_range(u64 start, u64 end)
|
||||
{
|
||||
struct physmem_range *range;
|
||||
|
||||
if (physmem_info.range_count) {
|
||||
range = __get_physmem_range_ptr(physmem_info.range_count - 1);
|
||||
if (range->end == start) {
|
||||
range->end = end;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
range = __get_physmem_range_ptr(physmem_info.range_count);
|
||||
range->start = start;
|
||||
range->end = end;
|
||||
physmem_info.range_count++;
|
||||
}
|
||||
|
||||
static int __diag260(unsigned long rx1, unsigned long rx2)
|
||||
{
|
||||
unsigned long reg1, reg2, ry;
|
||||
union register_pair rx;
|
||||
psw_t old;
|
||||
int rc;
|
||||
|
||||
rx.even = rx1;
|
||||
rx.odd = rx2;
|
||||
ry = 0x10; /* storage configuration */
|
||||
rc = -1; /* fail */
|
||||
asm volatile(
|
||||
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
|
||||
" epsw %[reg1],%[reg2]\n"
|
||||
" st %[reg1],0(%[psw_pgm])\n"
|
||||
" st %[reg2],4(%[psw_pgm])\n"
|
||||
" larl %[reg1],1f\n"
|
||||
" stg %[reg1],8(%[psw_pgm])\n"
|
||||
" diag %[rx],%[ry],0x260\n"
|
||||
" ipm %[rc]\n"
|
||||
" srl %[rc],28\n"
|
||||
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
|
||||
: [reg1] "=&d" (reg1),
|
||||
[reg2] "=&a" (reg2),
|
||||
[rc] "+&d" (rc),
|
||||
[ry] "+&d" (ry),
|
||||
"+Q" (S390_lowcore.program_new_psw),
|
||||
"=Q" (old)
|
||||
: [rx] "d" (rx.pair),
|
||||
[psw_old] "a" (&old),
|
||||
[psw_pgm] "a" (&S390_lowcore.program_new_psw)
|
||||
: "cc", "memory");
|
||||
return rc == 0 ? ry : -1;
|
||||
}
|
||||
|
||||
static int diag260(void)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
struct {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
|
||||
|
||||
memset(storage_extents, 0, sizeof(storage_extents));
|
||||
rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
|
||||
if (rc == -1)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
|
||||
add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tprot(unsigned long addr)
|
||||
{
|
||||
unsigned long reg1, reg2;
|
||||
int rc = -EFAULT;
|
||||
psw_t old;
|
||||
|
||||
asm volatile(
|
||||
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
|
||||
" epsw %[reg1],%[reg2]\n"
|
||||
" st %[reg1],0(%[psw_pgm])\n"
|
||||
" st %[reg2],4(%[psw_pgm])\n"
|
||||
" larl %[reg1],1f\n"
|
||||
" stg %[reg1],8(%[psw_pgm])\n"
|
||||
" tprot 0(%[addr]),0\n"
|
||||
" ipm %[rc]\n"
|
||||
" srl %[rc],28\n"
|
||||
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
|
||||
: [reg1] "=&d" (reg1),
|
||||
[reg2] "=&a" (reg2),
|
||||
[rc] "+&d" (rc),
|
||||
"=Q" (S390_lowcore.program_new_psw.addr),
|
||||
"=Q" (old)
|
||||
: [psw_old] "a" (&old),
|
||||
[psw_pgm] "a" (&S390_lowcore.program_new_psw),
|
||||
[addr] "a" (addr)
|
||||
: "cc", "memory");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static unsigned long search_mem_end(void)
|
||||
{
|
||||
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
|
||||
unsigned long offset = 0;
|
||||
unsigned long pivot;
|
||||
|
||||
while (range > 1) {
|
||||
range >>= 1;
|
||||
pivot = offset + range;
|
||||
if (!tprot(pivot << 20))
|
||||
offset = pivot;
|
||||
}
|
||||
return (offset + 1) << 20;
|
||||
}
|
||||
|
||||
unsigned long detect_max_physmem_end(void)
|
||||
{
|
||||
unsigned long max_physmem_end = 0;
|
||||
|
||||
if (!sclp_early_get_memsize(&max_physmem_end)) {
|
||||
physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
|
||||
} else {
|
||||
max_physmem_end = search_mem_end();
|
||||
physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
|
||||
}
|
||||
return max_physmem_end;
|
||||
}
|
||||
|
||||
void detect_physmem_online_ranges(unsigned long max_physmem_end)
|
||||
{
|
||||
if (!sclp_early_read_storage_info()) {
|
||||
physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
|
||||
} else if (!diag260()) {
|
||||
physmem_info.info_source = MEM_DETECT_DIAG260;
|
||||
} else if (max_physmem_end) {
|
||||
add_physmem_online_range(0, max_physmem_end);
|
||||
}
|
||||
}
|
||||
|
||||
void physmem_set_usable_limit(unsigned long limit)
|
||||
{
|
||||
physmem_info.usable = limit;
|
||||
physmem_alloc_pos = limit;
|
||||
}
|
||||
|
||||
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
|
||||
{
|
||||
unsigned long start, end, total_mem = 0, total_reserved_mem = 0;
|
||||
struct reserved_range *range;
|
||||
enum reserved_range_type t;
|
||||
int i;
|
||||
|
||||
decompressor_printk("Linux version %s\n", kernel_version);
|
||||
if (!is_prot_virt_guest() && early_command_line[0])
|
||||
decompressor_printk("Kernel command line: %s\n", early_command_line);
|
||||
decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
|
||||
size, align, min, max);
|
||||
decompressor_printk("Reserved memory ranges:\n");
|
||||
for_each_physmem_reserved_range(t, range, &start, &end) {
|
||||
decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
|
||||
total_reserved_mem += end - start;
|
||||
}
|
||||
decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
|
||||
get_physmem_info_source(), physmem_info.info_source);
|
||||
for_each_physmem_usable_range(i, &start, &end) {
|
||||
decompressor_printk("%016lx %016lx\n", start, end);
|
||||
total_mem += end - start;
|
||||
}
|
||||
decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
|
||||
total_mem, total_reserved_mem,
|
||||
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
|
||||
print_stacktrace(current_frame_address());
|
||||
sclp_early_printk("\n\n -- System halted\n");
|
||||
disabled_wait();
|
||||
}
|
||||
|
||||
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
|
||||
{
|
||||
physmem_info.reserved[type].start = addr;
|
||||
physmem_info.reserved[type].end = addr + size;
|
||||
}
|
||||
|
||||
void physmem_free(enum reserved_range_type type)
|
||||
{
|
||||
physmem_info.reserved[type].start = 0;
|
||||
physmem_info.reserved[type].end = 0;
|
||||
}
|
||||
|
||||
static bool __physmem_alloc_intersects(unsigned long addr, unsigned long size,
|
||||
unsigned long *intersection_start)
|
||||
{
|
||||
unsigned long res_addr, res_size;
|
||||
int t;
|
||||
|
||||
for (t = 0; t < RR_MAX; t++) {
|
||||
if (!get_physmem_reserved(t, &res_addr, &res_size))
|
||||
continue;
|
||||
if (intersects(addr, size, res_addr, res_size)) {
|
||||
*intersection_start = res_addr;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return ipl_report_certs_intersects(addr, size, intersection_start);
|
||||
}
|
||||
|
||||
static unsigned long __physmem_alloc_range(unsigned long size, unsigned long align,
|
||||
unsigned long min, unsigned long max,
|
||||
unsigned int from_ranges, unsigned int *ranges_left,
|
||||
bool die_on_oom)
|
||||
{
|
||||
unsigned int nranges = from_ranges ?: physmem_info.range_count;
|
||||
unsigned long range_start, range_end;
|
||||
unsigned long intersection_start;
|
||||
unsigned long addr, pos = max;
|
||||
|
||||
align = max(align, 8UL);
|
||||
while (nranges) {
|
||||
__get_physmem_range(nranges - 1, &range_start, &range_end, false);
|
||||
pos = min(range_end, pos);
|
||||
|
||||
if (round_up(min, align) + size > pos)
|
||||
break;
|
||||
addr = round_down(pos - size, align);
|
||||
if (range_start > addr) {
|
||||
nranges--;
|
||||
continue;
|
||||
}
|
||||
if (__physmem_alloc_intersects(addr, size, &intersection_start)) {
|
||||
pos = intersection_start;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ranges_left)
|
||||
*ranges_left = nranges;
|
||||
return addr;
|
||||
}
|
||||
if (die_on_oom)
|
||||
die_oom(size, align, min, max);
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
|
||||
unsigned long align, unsigned long min, unsigned long max,
|
||||
bool die_on_oom)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
max = min(max, physmem_alloc_pos);
|
||||
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
|
||||
if (addr)
|
||||
physmem_reserve(type, addr, size);
|
||||
return addr;
|
||||
}
|
||||
|
||||
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
|
||||
unsigned long align)
|
||||
{
|
||||
struct reserved_range *range = &physmem_info.reserved[type];
|
||||
struct reserved_range *new_range;
|
||||
unsigned int ranges_left;
|
||||
unsigned long addr;
|
||||
|
||||
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
|
||||
&ranges_left, true);
|
||||
/* if not a consecutive allocation of the same type or first allocation */
|
||||
if (range->start != addr + size) {
|
||||
if (range->end) {
|
||||
physmem_alloc_pos = __physmem_alloc_range(
|
||||
sizeof(struct reserved_range), 0, 0, physmem_alloc_pos,
|
||||
physmem_alloc_ranges, &ranges_left, true);
|
||||
new_range = (struct reserved_range *)physmem_alloc_pos;
|
||||
*new_range = *range;
|
||||
range->chain = new_range;
|
||||
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
|
||||
ranges_left, &ranges_left, true);
|
||||
}
|
||||
range->end = addr + size;
|
||||
}
|
||||
range->start = addr;
|
||||
physmem_alloc_pos = addr;
|
||||
physmem_alloc_ranges = ranges_left;
|
||||
return addr;
|
||||
}
|
||||
|
||||
unsigned long get_physmem_alloc_pos(void)
|
||||
{
|
||||
return physmem_alloc_pos;
|
||||
}
|
|
@ -12,7 +12,7 @@
|
|||
#include <asm/diag.h>
|
||||
#include <asm/uv.h>
|
||||
#include <asm/abs_lowcore.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include "decompressor.h"
|
||||
#include "boot.h"
|
||||
#include "uv.h"
|
||||
|
@ -21,7 +21,6 @@ unsigned long __bootdata_preserved(__kaslr_offset);
|
|||
unsigned long __bootdata_preserved(__abs_lowcore);
|
||||
unsigned long __bootdata_preserved(__memcpy_real_area);
|
||||
pte_t *__bootdata_preserved(memcpy_real_ptep);
|
||||
unsigned long __bootdata(__amode31_base);
|
||||
unsigned long __bootdata_preserved(VMALLOC_START);
|
||||
unsigned long __bootdata_preserved(VMALLOC_END);
|
||||
struct page *__bootdata_preserved(vmemmap);
|
||||
|
@ -29,8 +28,6 @@ unsigned long __bootdata_preserved(vmemmap_size);
|
|||
unsigned long __bootdata_preserved(MODULES_VADDR);
|
||||
unsigned long __bootdata_preserved(MODULES_END);
|
||||
unsigned long __bootdata(ident_map_size);
|
||||
int __bootdata(is_full_image) = 1;
|
||||
struct initrd_data __bootdata(initrd_data);
|
||||
|
||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
|
||||
|
@ -76,17 +73,20 @@ unsigned long mem_safe_offset(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static unsigned long rescue_initrd(unsigned long safe_addr)
|
||||
static void rescue_initrd(unsigned long min, unsigned long max)
|
||||
{
|
||||
unsigned long old_addr, addr, size;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
|
||||
return safe_addr;
|
||||
if (!initrd_data.start || !initrd_data.size)
|
||||
return safe_addr;
|
||||
if (initrd_data.start < safe_addr) {
|
||||
memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
|
||||
initrd_data.start = safe_addr;
|
||||
}
|
||||
return initrd_data.start + initrd_data.size;
|
||||
return;
|
||||
if (!get_physmem_reserved(RR_INITRD, &addr, &size))
|
||||
return;
|
||||
if (addr >= min && addr + size <= max)
|
||||
return;
|
||||
old_addr = addr;
|
||||
physmem_free(RR_INITRD);
|
||||
addr = physmem_alloc_top_down(RR_INITRD, size, 0);
|
||||
memmove((void *)addr, (void *)old_addr, size);
|
||||
}
|
||||
|
||||
static void copy_bootdata(void)
|
||||
|
@ -140,7 +140,7 @@ static void handle_relocs(unsigned long offset)
|
|||
*
|
||||
* Consider the following factors:
|
||||
* 1. max_physmem_end - end of physical memory online or standby.
|
||||
* Always <= end of the last online memory block (get_mem_detect_end()).
|
||||
* Always >= end of the last online memory range (get_physmem_online_end()).
|
||||
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
|
||||
* kernel is able to support.
|
||||
* 3. "mem=" kernel command line option which limits physical memory usage.
|
||||
|
@ -160,10 +160,10 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
|
|||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (oldmem_data.start) {
|
||||
kaslr_enabled = 0;
|
||||
__kaslr_enabled = 0;
|
||||
ident_map_size = min(ident_map_size, oldmem_data.size);
|
||||
} else if (ipl_block_valid && is_ipl_block_dump()) {
|
||||
kaslr_enabled = 0;
|
||||
__kaslr_enabled = 0;
|
||||
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
|
||||
ident_map_size = min(ident_map_size, hsa_size);
|
||||
}
|
||||
|
@ -235,9 +235,9 @@ static unsigned long setup_kernel_memory_layout(void)
|
|||
/*
|
||||
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
|
||||
*/
|
||||
static void clear_bss_section(void)
|
||||
static void clear_bss_section(unsigned long vmlinux_lma)
|
||||
{
|
||||
memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
|
||||
memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -256,7 +256,6 @@ static void setup_vmalloc_size(void)
|
|||
|
||||
static void offset_vmlinux_info(unsigned long offset)
|
||||
{
|
||||
vmlinux.default_lma += offset;
|
||||
*(unsigned long *)(&vmlinux.entry) += offset;
|
||||
vmlinux.bootdata_off += offset;
|
||||
vmlinux.bootdata_preserved_off += offset;
|
||||
|
@ -266,60 +265,83 @@ static void offset_vmlinux_info(unsigned long offset)
|
|||
vmlinux.init_mm_off += offset;
|
||||
vmlinux.swapper_pg_dir_off += offset;
|
||||
vmlinux.invalid_pg_dir_off += offset;
|
||||
}
|
||||
|
||||
static unsigned long reserve_amode31(unsigned long safe_addr)
|
||||
{
|
||||
__amode31_base = PAGE_ALIGN(safe_addr);
|
||||
return __amode31_base + vmlinux.amode31_size;
|
||||
#ifdef CONFIG_KASAN
|
||||
vmlinux.kasan_early_shadow_page_off += offset;
|
||||
vmlinux.kasan_early_shadow_pte_off += offset;
|
||||
vmlinux.kasan_early_shadow_pmd_off += offset;
|
||||
vmlinux.kasan_early_shadow_pud_off += offset;
|
||||
vmlinux.kasan_early_shadow_p4d_off += offset;
|
||||
#endif
|
||||
}
|
||||
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long max_physmem_end;
|
||||
unsigned long random_lma;
|
||||
unsigned long safe_addr;
|
||||
unsigned long vmlinux_lma = 0;
|
||||
unsigned long amode31_lma = 0;
|
||||
unsigned long asce_limit;
|
||||
unsigned long safe_addr;
|
||||
void *img;
|
||||
psw_t psw;
|
||||
|
||||
initrd_data.start = parmarea.initrd_start;
|
||||
initrd_data.size = parmarea.initrd_size;
|
||||
setup_lpp();
|
||||
safe_addr = mem_safe_offset();
|
||||
/*
|
||||
* reserve decompressor memory together with decompression heap, buffer and
|
||||
* memory which might be occupied by uncompressed kernel at default 1Mb
|
||||
* position (if KASLR is off or failed).
|
||||
*/
|
||||
physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
|
||||
physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
|
||||
oldmem_data.start = parmarea.oldmem_base;
|
||||
oldmem_data.size = parmarea.oldmem_size;
|
||||
|
||||
setup_lpp();
|
||||
store_ipl_parmblock();
|
||||
safe_addr = mem_safe_offset();
|
||||
safe_addr = reserve_amode31(safe_addr);
|
||||
safe_addr = read_ipl_report(safe_addr);
|
||||
read_ipl_report();
|
||||
uv_query_info();
|
||||
safe_addr = rescue_initrd(safe_addr);
|
||||
sclp_early_read_info();
|
||||
setup_boot_command_line();
|
||||
parse_boot_command_line();
|
||||
detect_facilities();
|
||||
sanitize_prot_virt_host();
|
||||
max_physmem_end = detect_memory(&safe_addr);
|
||||
max_physmem_end = detect_max_physmem_end();
|
||||
setup_ident_map_size(max_physmem_end);
|
||||
setup_vmalloc_size();
|
||||
asce_limit = setup_kernel_memory_layout();
|
||||
mem_detect_set_usable_limit(ident_map_size);
|
||||
/* got final ident_map_size, physmem allocations could be performed now */
|
||||
physmem_set_usable_limit(ident_map_size);
|
||||
detect_physmem_online_ranges(max_physmem_end);
|
||||
save_ipl_cert_comp_list();
|
||||
rescue_initrd(safe_addr, ident_map_size);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
|
||||
random_lma = get_random_base(safe_addr);
|
||||
if (random_lma) {
|
||||
__kaslr_offset = random_lma - vmlinux.default_lma;
|
||||
img = (void *)vmlinux.default_lma;
|
||||
if (kaslr_enabled()) {
|
||||
vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
|
||||
THREAD_SIZE, vmlinux.default_lma,
|
||||
ident_map_size);
|
||||
if (vmlinux_lma) {
|
||||
__kaslr_offset = vmlinux_lma - vmlinux.default_lma;
|
||||
offset_vmlinux_info(__kaslr_offset);
|
||||
}
|
||||
}
|
||||
vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
|
||||
physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
|
||||
img = decompress_kernel();
|
||||
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
|
||||
} else if (__kaslr_offset)
|
||||
memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
|
||||
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
|
||||
} else if (__kaslr_offset) {
|
||||
img = (void *)vmlinux.default_lma;
|
||||
memmove((void *)vmlinux_lma, img, vmlinux.image_size);
|
||||
memset(img, 0, vmlinux.image_size);
|
||||
}
|
||||
|
||||
/* vmlinux decompression is done, shrink reserved low memory */
|
||||
physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
|
||||
if (kaslr_enabled())
|
||||
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
|
||||
amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
|
||||
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
|
||||
|
||||
/*
|
||||
* The order of the following operations is important:
|
||||
|
@ -334,21 +356,16 @@ void startup_kernel(void)
|
|||
* - copy_bootdata() must follow setup_vmem() to propagate changes to
|
||||
* bootdata made by setup_vmem()
|
||||
*/
|
||||
clear_bss_section();
|
||||
clear_bss_section(vmlinux_lma);
|
||||
handle_relocs(__kaslr_offset);
|
||||
setup_vmem(asce_limit);
|
||||
copy_bootdata();
|
||||
|
||||
if (__kaslr_offset) {
|
||||
/*
|
||||
* Save KASLR offset for early dumps, before vmcore_info is set.
|
||||
* Mark as uneven to distinguish from real vmcore_info pointer.
|
||||
*/
|
||||
S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
|
||||
/* Clear non-relocated kernel */
|
||||
if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
|
||||
memset(img, 0, vmlinux.image_size);
|
||||
}
|
||||
/*
|
||||
* Save KASLR offset for early dumps, before vmcore_info is set.
|
||||
* Mark as uneven to distinguish from real vmcore_info pointer.
|
||||
*/
|
||||
S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
|
||||
|
||||
/*
|
||||
* Jump to the decompressed kernel entry point and switch DAT mode on.
|
||||
|
|
|
@ -1,19 +1,202 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <asm/maccess.h>
|
||||
#include <asm/abs_lowcore.h>
|
||||
#include "decompressor.h"
|
||||
#include "boot.h"
|
||||
|
||||
unsigned long __bootdata_preserved(s390_invalid_asce);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
|
||||
#endif
|
||||
|
||||
#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
|
||||
#define swapper_pg_dir vmlinux.swapper_pg_dir_off
|
||||
#define invalid_pg_dir vmlinux.invalid_pg_dir_off
|
||||
|
||||
enum populate_mode {
|
||||
POPULATE_NONE,
|
||||
POPULATE_DIRECT,
|
||||
POPULATE_ABS_LOWCORE,
|
||||
#ifdef CONFIG_KASAN
|
||||
POPULATE_KASAN_MAP_SHADOW,
|
||||
POPULATE_KASAN_ZERO_SHADOW,
|
||||
POPULATE_KASAN_SHALLOW
|
||||
#endif
|
||||
};
|
||||
|
||||
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
|
||||
#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
|
||||
#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
|
||||
#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
|
||||
#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
|
||||
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
|
||||
|
||||
static pte_t pte_z;
|
||||
|
||||
static void kasan_populate_shadow(void)
|
||||
{
|
||||
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
|
||||
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
|
||||
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
|
||||
unsigned long untracked_end;
|
||||
unsigned long start, end;
|
||||
int i;
|
||||
|
||||
pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
|
||||
if (!machine.has_nx)
|
||||
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
|
||||
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
|
||||
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
|
||||
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
|
||||
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
|
||||
|
||||
/*
|
||||
* Current memory layout:
|
||||
* +- 0 -------------+ +- shadow start -+
|
||||
* |1:1 ident mapping| /|1/8 of ident map|
|
||||
* | | / | |
|
||||
* +-end of ident map+ / +----------------+
|
||||
* | ... gap ... | / | kasan |
|
||||
* | | / | zero page |
|
||||
* +- vmalloc area -+ / | mapping |
|
||||
* | vmalloc_size | / | (untracked) |
|
||||
* +- modules vaddr -+ / +----------------+
|
||||
* | 2Gb |/ | unmapped | allocated per module
|
||||
* +- shadow start -+ +----------------+
|
||||
* | 1/8 addr space | | zero pg mapping| (untracked)
|
||||
* +- shadow end ----+---------+- shadow end ---+
|
||||
*
|
||||
* Current memory layout (KASAN_VMALLOC):
|
||||
* +- 0 -------------+ +- shadow start -+
|
||||
* |1:1 ident mapping| /|1/8 of ident map|
|
||||
* | | / | |
|
||||
* +-end of ident map+ / +----------------+
|
||||
* | ... gap ... | / | kasan zero page| (untracked)
|
||||
* | | / | mapping |
|
||||
* +- vmalloc area -+ / +----------------+
|
||||
* | vmalloc_size | / |shallow populate|
|
||||
* +- modules vaddr -+ / +----------------+
|
||||
* | 2Gb |/ |shallow populate|
|
||||
* +- shadow start -+ +----------------+
|
||||
* | 1/8 addr space | | zero pg mapping| (untracked)
|
||||
* +- shadow end ----+---------+- shadow end ---+
|
||||
*/
|
||||
|
||||
for_each_physmem_usable_range(i, &start, &end)
|
||||
pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
untracked_end = VMALLOC_START;
|
||||
/* shallowly populate kasan shadow for vmalloc and modules */
|
||||
pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
|
||||
} else {
|
||||
untracked_end = MODULES_VADDR;
|
||||
}
|
||||
/* populate kasan shadow for untracked memory */
|
||||
pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
|
||||
pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
|
||||
}
|
||||
|
||||
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
|
||||
IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
|
||||
pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
|
||||
IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
|
||||
p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
|
||||
IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
|
||||
pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
|
||||
IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
|
||||
pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
if (mode == POPULATE_KASAN_ZERO_SHADOW) {
|
||||
set_pte(pte, pte_z);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
|
||||
static inline void kasan_populate_shadow(void) {}
|
||||
|
||||
static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, enum populate_mode mode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
|
||||
*/
|
||||
|
@ -22,60 +205,13 @@ static inline pte_t *__virt_to_kpte(unsigned long va)
|
|||
return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
|
||||
}
|
||||
|
||||
unsigned long __bootdata_preserved(s390_invalid_asce);
|
||||
unsigned long __bootdata(pgalloc_pos);
|
||||
unsigned long __bootdata(pgalloc_end);
|
||||
unsigned long __bootdata(pgalloc_low);
|
||||
|
||||
enum populate_mode {
|
||||
POPULATE_NONE,
|
||||
POPULATE_ONE2ONE,
|
||||
POPULATE_ABS_LOWCORE,
|
||||
};
|
||||
|
||||
static void boot_check_oom(void)
|
||||
{
|
||||
if (pgalloc_pos < pgalloc_low)
|
||||
error("out of memory on boot\n");
|
||||
}
|
||||
|
||||
static void pgtable_populate_init(void)
|
||||
{
|
||||
unsigned long initrd_end;
|
||||
unsigned long kernel_end;
|
||||
|
||||
kernel_end = vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
|
||||
pgalloc_low = round_up(kernel_end, PAGE_SIZE);
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
|
||||
initrd_end = round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
|
||||
pgalloc_low = max(pgalloc_low, initrd_end);
|
||||
}
|
||||
|
||||
pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
|
||||
pgalloc_pos = pgalloc_end;
|
||||
|
||||
boot_check_oom();
|
||||
}
|
||||
|
||||
static void *boot_alloc_pages(unsigned int order)
|
||||
{
|
||||
unsigned long size = PAGE_SIZE << order;
|
||||
|
||||
pgalloc_pos -= size;
|
||||
pgalloc_pos = round_down(pgalloc_pos, size);
|
||||
|
||||
boot_check_oom();
|
||||
|
||||
return (void *)pgalloc_pos;
|
||||
}
|
||||
|
||||
static void *boot_crst_alloc(unsigned long val)
|
||||
{
|
||||
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
|
||||
unsigned long *table;
|
||||
|
||||
table = boot_alloc_pages(CRST_ALLOC_ORDER);
|
||||
if (table)
|
||||
crst_table_init(table, val);
|
||||
table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
|
||||
crst_table_init(table, val);
|
||||
return table;
|
||||
}
|
||||
|
||||
|
@ -84,28 +220,37 @@ static pte_t *boot_pte_alloc(void)
|
|||
static void *pte_leftover;
|
||||
pte_t *pte;
|
||||
|
||||
BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* handling pte_leftovers this way helps to avoid memory fragmentation
|
||||
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off
|
||||
*/
|
||||
if (!pte_leftover) {
|
||||
pte_leftover = boot_alloc_pages(0);
|
||||
pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
|
||||
pte = pte_leftover + _PAGE_TABLE_SIZE;
|
||||
} else {
|
||||
pte = pte_leftover;
|
||||
pte_leftover = NULL;
|
||||
}
|
||||
|
||||
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
return pte;
|
||||
}
|
||||
|
||||
static unsigned long _pa(unsigned long addr, enum populate_mode mode)
|
||||
static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case POPULATE_NONE:
|
||||
return -1;
|
||||
case POPULATE_ONE2ONE:
|
||||
case POPULATE_DIRECT:
|
||||
return addr;
|
||||
case POPULATE_ABS_LOWCORE:
|
||||
return __abs_lowcore_pa(addr);
|
||||
#ifdef CONFIG_KASAN
|
||||
case POPULATE_KASAN_MAP_SHADOW:
|
||||
addr = physmem_alloc_top_down(RR_VMEM, size, size);
|
||||
memset((void *)addr, 0, size);
|
||||
return addr;
|
||||
#endif
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
@ -126,23 +271,28 @@ static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
|
|||
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long pages = 0;
|
||||
pte_t *pte, entry;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
for (; addr < end; addr += PAGE_SIZE, pte++) {
|
||||
if (pte_none(*pte)) {
|
||||
entry = __pte(_pa(addr, mode));
|
||||
if (kasan_pte_populate_zero_shadow(pte, mode))
|
||||
continue;
|
||||
entry = __pte(_pa(addr, PAGE_SIZE, mode));
|
||||
entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
|
||||
set_pte(pte, entry);
|
||||
pages++;
|
||||
}
|
||||
}
|
||||
if (mode == POPULATE_DIRECT)
|
||||
update_page_count(PG_DIRECT_MAP_4K, pages);
|
||||
}
|
||||
|
||||
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long next, pages = 0;
|
||||
pmd_t *pmd, entry;
|
||||
pte_t *pte;
|
||||
|
||||
|
@ -150,10 +300,13 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
|
|||
for (; addr < end; addr = next, pmd++) {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(*pmd)) {
|
||||
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
|
||||
continue;
|
||||
if (can_large_pmd(pmd, addr, next)) {
|
||||
entry = __pmd(_pa(addr, mode));
|
||||
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
|
||||
entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
|
||||
set_pmd(pmd, entry);
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
pte = boot_pte_alloc();
|
||||
|
@ -163,12 +316,14 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
|
|||
}
|
||||
pgtable_pte_populate(pmd, addr, next, mode);
|
||||
}
|
||||
if (mode == POPULATE_DIRECT)
|
||||
update_page_count(PG_DIRECT_MAP_1M, pages);
|
||||
}
|
||||
|
||||
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long next, pages = 0;
|
||||
pud_t *pud, entry;
|
||||
pmd_t *pmd;
|
||||
|
||||
|
@ -176,10 +331,13 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
|
|||
for (; addr < end; addr = next, pud++) {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(*pud)) {
|
||||
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
|
||||
continue;
|
||||
if (can_large_pud(pud, addr, next)) {
|
||||
entry = __pud(_pa(addr, mode));
|
||||
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
|
||||
entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
|
||||
set_pud(pud, entry);
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
||||
|
@ -189,6 +347,8 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
|
|||
}
|
||||
pgtable_pmd_populate(pud, addr, next, mode);
|
||||
}
|
||||
if (mode == POPULATE_DIRECT)
|
||||
update_page_count(PG_DIRECT_MAP_2G, pages);
|
||||
}
|
||||
|
||||
static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
|
@ -202,6 +362,8 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e
|
|||
for (; addr < end; addr = next, p4d++) {
|
||||
next = p4d_addr_end(addr, end);
|
||||
if (p4d_none(*p4d)) {
|
||||
if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
|
||||
continue;
|
||||
pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
|
||||
p4d_populate(&init_mm, p4d, pud);
|
||||
}
|
||||
|
@ -219,9 +381,15 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
|
|||
for (; addr < end; addr = next, pgd++) {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(*pgd)) {
|
||||
if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
|
||||
continue;
|
||||
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
|
||||
pgd_populate(&init_mm, pgd, p4d);
|
||||
}
|
||||
#ifdef CONFIG_KASAN
|
||||
if (mode == POPULATE_KASAN_SHALLOW)
|
||||
continue;
|
||||
#endif
|
||||
pgtable_p4d_populate(pgd, addr, next, mode);
|
||||
}
|
||||
}
|
||||
|
@ -250,16 +418,17 @@ void setup_vmem(unsigned long asce_limit)
|
|||
* To prevent creation of a large page at address 0 first map
|
||||
* the lowcore and create the identity mapping only afterwards.
|
||||
*/
|
||||
pgtable_populate_init();
|
||||
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
|
||||
for_each_mem_detect_usable_block(i, &start, &end)
|
||||
pgtable_populate(start, end, POPULATE_ONE2ONE);
|
||||
pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
|
||||
for_each_physmem_usable_range(i, &start, &end)
|
||||
pgtable_populate(start, end, POPULATE_DIRECT);
|
||||
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
|
||||
POPULATE_ABS_LOWCORE);
|
||||
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
|
||||
POPULATE_NONE);
|
||||
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
|
||||
|
||||
kasan_populate_shadow();
|
||||
|
||||
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
|
||||
S390_lowcore.user_asce = s390_invalid_asce;
|
||||
|
||||
|
@ -269,10 +438,3 @@ void setup_vmem(unsigned long asce_limit)
|
|||
|
||||
init_mm.context.asce = S390_lowcore.kernel_asce;
|
||||
}
|
||||
|
||||
unsigned long vmem_estimate_memory_needs(unsigned long online_mem_total)
|
||||
{
|
||||
unsigned long pages = DIV_ROUND_UP(online_mem_total, PAGE_SIZE);
|
||||
|
||||
return DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
|
||||
}
|
||||
|
|
|
@ -93,6 +93,8 @@ SECTIONS
|
|||
_decompressor_syms_end = .;
|
||||
}
|
||||
|
||||
_decompressor_end = .;
|
||||
|
||||
#ifdef CONFIG_KERNEL_UNCOMPRESSED
|
||||
. = 0x100000;
|
||||
#else
|
||||
|
|
|
@ -13,27 +13,28 @@
|
|||
#define SP %r15
|
||||
#define FRAME (16 * 8 + 4 * 8)
|
||||
|
||||
.data
|
||||
.align 32
|
||||
.data
|
||||
.balign 32
|
||||
|
||||
.Lsigma:
|
||||
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
|
||||
.long 1,0,0,0
|
||||
.long 2,0,0,0
|
||||
.long 3,0,0,0
|
||||
.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
|
||||
SYM_DATA_START_LOCAL(sigma)
|
||||
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
|
||||
.long 1,0,0,0
|
||||
.long 2,0,0,0
|
||||
.long 3,0,0,0
|
||||
.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
|
||||
|
||||
.long 0,1,2,3
|
||||
.long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma
|
||||
.long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
|
||||
.long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
|
||||
.long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
|
||||
.long 0,1,2,3
|
||||
.long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma
|
||||
.long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
|
||||
.long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
|
||||
.long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
|
||||
SYM_DATA_END(sigma)
|
||||
|
||||
.previous
|
||||
.previous
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.text
|
||||
.text
|
||||
|
||||
#############################################################################
|
||||
# void chacha20_vx_4x(u8 *out, counst u8 *inp, size_t len,
|
||||
|
@ -78,10 +79,10 @@
|
|||
#define XT2 %v29
|
||||
#define XT3 %v30
|
||||
|
||||
ENTRY(chacha20_vx_4x)
|
||||
SYM_FUNC_START(chacha20_vx_4x)
|
||||
stmg %r6,%r7,6*8(SP)
|
||||
|
||||
larl %r7,.Lsigma
|
||||
larl %r7,sigma
|
||||
lhi %r0,10
|
||||
lhi %r1,0
|
||||
|
||||
|
@ -403,7 +404,7 @@ ENTRY(chacha20_vx_4x)
|
|||
|
||||
lmg %r6,%r7,6*8(SP)
|
||||
BR_EX %r14
|
||||
ENDPROC(chacha20_vx_4x)
|
||||
SYM_FUNC_END(chacha20_vx_4x)
|
||||
|
||||
#undef OUT
|
||||
#undef INP
|
||||
|
@ -471,7 +472,7 @@ ENDPROC(chacha20_vx_4x)
|
|||
#define T2 %v29
|
||||
#define T3 %v30
|
||||
|
||||
ENTRY(chacha20_vx)
|
||||
SYM_FUNC_START(chacha20_vx)
|
||||
clgfi LEN,256
|
||||
jle chacha20_vx_4x
|
||||
stmg %r6,%r7,6*8(SP)
|
||||
|
@ -481,7 +482,7 @@ ENTRY(chacha20_vx)
|
|||
la SP,0(%r1,SP)
|
||||
stg %r0,0(SP) # back-chain
|
||||
|
||||
larl %r7,.Lsigma
|
||||
larl %r7,sigma
|
||||
lhi %r0,10
|
||||
|
||||
VLM K1,K2,0,KEY,0 # load key
|
||||
|
@ -902,6 +903,6 @@ ENTRY(chacha20_vx)
|
|||
lmg %r6,%r7,FRAME+6*8(SP)
|
||||
la SP,FRAME(SP)
|
||||
BR_EX %r14
|
||||
ENDPROC(chacha20_vx)
|
||||
SYM_FUNC_END(chacha20_vx)
|
||||
|
||||
.previous
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
#define CONST_RU_POLY %v13
|
||||
#define CONST_CRC_POLY %v14
|
||||
|
||||
.data
|
||||
.align 8
|
||||
.data
|
||||
.balign 8
|
||||
|
||||
/*
|
||||
* The CRC-32 constant block contains reduction constants to fold and
|
||||
|
@ -58,19 +58,20 @@
|
|||
* P'(x) = 0xEDB88320
|
||||
*/
|
||||
|
||||
.Lconstants_CRC_32_BE:
|
||||
SYM_DATA_START_LOCAL(constants_CRC_32_BE)
|
||||
.quad 0x08833794c, 0x0e6228b11 # R1, R2
|
||||
.quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4
|
||||
.quad 0x0f200aa66, 1 << 32 # R5, x32
|
||||
.quad 0x0490d678d, 1 # R6, 1
|
||||
.quad 0x104d101df, 0 # u
|
||||
.quad 0x104C11DB7, 0 # P(x)
|
||||
SYM_DATA_END(constants_CRC_32_BE)
|
||||
|
||||
.previous
|
||||
.previous
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.text
|
||||
.text
|
||||
/*
|
||||
* The CRC-32 function(s) use these calling conventions:
|
||||
*
|
||||
|
@ -90,9 +91,9 @@
|
|||
*
|
||||
* V9..V14: CRC-32 constants.
|
||||
*/
|
||||
ENTRY(crc32_be_vgfm_16)
|
||||
SYM_FUNC_START(crc32_be_vgfm_16)
|
||||
/* Load CRC-32 constants */
|
||||
larl %r5,.Lconstants_CRC_32_BE
|
||||
larl %r5,constants_CRC_32_BE
|
||||
VLM CONST_R1R2,CONST_CRC_POLY,0,%r5
|
||||
|
||||
/* Load the initial CRC value into the leftmost word of V0. */
|
||||
|
@ -207,6 +208,6 @@ ENTRY(crc32_be_vgfm_16)
|
|||
.Ldone:
|
||||
VLGVF %r2,%v2,3
|
||||
BR_EX %r14
|
||||
ENDPROC(crc32_be_vgfm_16)
|
||||
SYM_FUNC_END(crc32_be_vgfm_16)
|
||||
|
||||
.previous
|
||||
|
|
|
@ -25,8 +25,8 @@
|
|||
#define CONST_RU_POLY %v13
|
||||
#define CONST_CRC_POLY %v14
|
||||
|
||||
.data
|
||||
.align 8
|
||||
.data
|
||||
.balign 8
|
||||
|
||||
/*
|
||||
* The CRC-32 constant block contains reduction constants to fold and
|
||||
|
@ -59,27 +59,29 @@
|
|||
* P'(x) = 0x82F63B78
|
||||
*/
|
||||
|
||||
.Lconstants_CRC_32_LE:
|
||||
SYM_DATA_START_LOCAL(constants_CRC_32_LE)
|
||||
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
|
||||
.quad 0x1c6e41596, 0x154442bd4 # R2, R1
|
||||
.quad 0x0ccaa009e, 0x1751997d0 # R4, R3
|
||||
.octa 0x163cd6124 # R5
|
||||
.octa 0x1F7011641 # u'
|
||||
.octa 0x1DB710641 # P'(x) << 1
|
||||
SYM_DATA_END(constants_CRC_32_LE)
|
||||
|
||||
.Lconstants_CRC_32C_LE:
|
||||
SYM_DATA_START_LOCAL(constants_CRC_32C_LE)
|
||||
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
|
||||
.quad 0x09e4addf8, 0x740eef02 # R2, R1
|
||||
.quad 0x14cd00bd6, 0xf20c0dfe # R4, R3
|
||||
.octa 0x0dd45aab8 # R5
|
||||
.octa 0x0dea713f1 # u'
|
||||
.octa 0x105ec76f0 # P'(x) << 1
|
||||
SYM_DATA_END(constants_CRC_32C_LE)
|
||||
|
||||
.previous
|
||||
.previous
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.text
|
||||
.text
|
||||
|
||||
/*
|
||||
* The CRC-32 functions use these calling conventions:
|
||||
|
@ -102,17 +104,17 @@
|
|||
* V10..V14: CRC-32 constants.
|
||||
*/
|
||||
|
||||
ENTRY(crc32_le_vgfm_16)
|
||||
larl %r5,.Lconstants_CRC_32_LE
|
||||
SYM_FUNC_START(crc32_le_vgfm_16)
|
||||
larl %r5,constants_CRC_32_LE
|
||||
j crc32_le_vgfm_generic
|
||||
ENDPROC(crc32_le_vgfm_16)
|
||||
SYM_FUNC_END(crc32_le_vgfm_16)
|
||||
|
||||
ENTRY(crc32c_le_vgfm_16)
|
||||
larl %r5,.Lconstants_CRC_32C_LE
|
||||
SYM_FUNC_START(crc32c_le_vgfm_16)
|
||||
larl %r5,constants_CRC_32C_LE
|
||||
j crc32_le_vgfm_generic
|
||||
ENDPROC(crc32c_le_vgfm_16)
|
||||
SYM_FUNC_END(crc32c_le_vgfm_16)
|
||||
|
||||
ENTRY(crc32_le_vgfm_generic)
|
||||
SYM_FUNC_START(crc32_le_vgfm_generic)
|
||||
/* Load CRC-32 constants */
|
||||
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
|
||||
|
||||
|
@ -268,6 +270,6 @@ ENTRY(crc32_le_vgfm_generic)
|
|||
.Ldone:
|
||||
VLGVF %r2,%v2,2
|
||||
BR_EX %r14
|
||||
ENDPROC(crc32_le_vgfm_generic)
|
||||
SYM_FUNC_END(crc32_le_vgfm_generic)
|
||||
|
||||
.previous
|
||||
|
|
|
@ -43,10 +43,11 @@ struct ap_queue_status {
|
|||
unsigned int queue_empty : 1;
|
||||
unsigned int replies_waiting : 1;
|
||||
unsigned int queue_full : 1;
|
||||
unsigned int _pad1 : 4;
|
||||
unsigned int : 3;
|
||||
unsigned int async : 1;
|
||||
unsigned int irq_enabled : 1;
|
||||
unsigned int response_code : 8;
|
||||
unsigned int _pad2 : 16;
|
||||
unsigned int : 16;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -86,6 +87,42 @@ static inline bool ap_instructions_available(void)
|
|||
return reg1 != 0;
|
||||
}
|
||||
|
||||
/* TAPQ register GR2 response struct */
|
||||
struct ap_tapq_gr2 {
|
||||
union {
|
||||
unsigned long value;
|
||||
struct {
|
||||
unsigned int fac : 32; /* facility bits */
|
||||
unsigned int apinfo : 32; /* ap type, ... */
|
||||
};
|
||||
struct {
|
||||
unsigned int s : 1; /* APSC */
|
||||
unsigned int m : 1; /* AP4KM */
|
||||
unsigned int c : 1; /* AP4KC */
|
||||
unsigned int mode : 3;
|
||||
unsigned int n : 1; /* APXA */
|
||||
unsigned int : 1;
|
||||
unsigned int class : 8;
|
||||
unsigned int bs : 2; /* SE bind/assoc */
|
||||
unsigned int : 14;
|
||||
unsigned int at : 8; /* ap type */
|
||||
unsigned int nd : 8; /* nr of domains */
|
||||
unsigned int : 4;
|
||||
unsigned int ml : 4; /* apxl ml */
|
||||
unsigned int : 4;
|
||||
unsigned int qd : 4; /* queue depth */
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* Convenience defines to be used with the bs field from struct ap_tapq_gr2
|
||||
*/
|
||||
#define AP_BS_Q_USABLE 0
|
||||
#define AP_BS_Q_USABLE_NO_SECURE_KEY 1
|
||||
#define AP_BS_Q_AVAIL_FOR_BINDING 2
|
||||
#define AP_BS_Q_UNUSABLE 3
|
||||
|
||||
/**
|
||||
* ap_tapq(): Test adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
|
@ -93,7 +130,7 @@ static inline bool ap_instructions_available(void)
|
|||
*
|
||||
* Returns AP queue status structure.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
|
||||
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info)
|
||||
{
|
||||
union ap_queue_status_reg reg1;
|
||||
unsigned long reg2;
|
||||
|
@ -108,7 +145,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
|
|||
: [qid] "d" (qid)
|
||||
: "cc", "0", "1", "2");
|
||||
if (info)
|
||||
*info = reg2;
|
||||
info->value = reg2;
|
||||
return reg1.status;
|
||||
}
|
||||
|
||||
|
@ -116,13 +153,12 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
|
|||
* ap_test_queue(): Test adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
* @tbit: Test facilities bit
|
||||
* @info: Pointer to queue descriptor
|
||||
* @info: Ptr to tapq gr2 struct
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
|
||||
int tbit,
|
||||
unsigned long *info)
|
||||
static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
|
||||
struct ap_tapq_gr2 *info)
|
||||
{
|
||||
if (tbit)
|
||||
qid |= 1UL << 23; /* set T bit*/
|
||||
|
@ -132,14 +168,18 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
|
|||
/**
|
||||
* ap_pqap_rapq(): Reset adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
* @fbit: if != 0 set F bit
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
|
||||
static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
|
||||
{
|
||||
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
|
||||
union ap_queue_status_reg reg1;
|
||||
|
||||
if (fbit)
|
||||
reg0 |= 1UL << 22;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
|
||||
|
@ -153,14 +193,18 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
|
|||
/**
|
||||
* ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
* @fbit: if != 0 set F bit
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
|
||||
static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
|
||||
{
|
||||
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
|
||||
union ap_queue_status_reg reg1;
|
||||
|
||||
if (fbit)
|
||||
reg0 |= 1UL << 22;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
|
||||
|
@ -180,15 +224,16 @@ struct ap_config_info {
|
|||
unsigned int apxa : 1; /* N bit */
|
||||
unsigned int qact : 1; /* C bit */
|
||||
unsigned int rc8a : 1; /* R bit */
|
||||
unsigned char _reserved1 : 4;
|
||||
unsigned char _reserved2[3];
|
||||
unsigned char Na; /* max # of APs - 1 */
|
||||
unsigned char Nd; /* max # of Domains - 1 */
|
||||
unsigned char _reserved3[10];
|
||||
unsigned int : 4;
|
||||
unsigned int apsb : 1; /* B bit */
|
||||
unsigned int : 23;
|
||||
unsigned char na; /* max # of APs - 1 */
|
||||
unsigned char nd; /* max # of Domains - 1 */
|
||||
unsigned char _reserved0[10];
|
||||
unsigned int apm[8]; /* AP ID mask */
|
||||
unsigned int aqm[8]; /* AP (usage) queue mask */
|
||||
unsigned int adm[8]; /* AP (control) domain mask */
|
||||
unsigned char _reserved4[16];
|
||||
unsigned char _reserved1[16];
|
||||
} __aligned(8);
|
||||
|
||||
/**
|
||||
|
@ -318,6 +363,59 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
|
|||
return reg1.status;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_bapq(): SE bind AP queue.
|
||||
* @qid: The AP queue number
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
*
|
||||
* Invoking this function in a non-SE environment
|
||||
* may case a specification exception.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
|
||||
{
|
||||
unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
|
||||
union ap_queue_status_reg reg1;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
: [reg1] "=&d" (reg1.value)
|
||||
: [reg0] "d" (reg0)
|
||||
: "cc", "0", "1");
|
||||
|
||||
return reg1.status;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_aapq(): SE associate AP queue.
|
||||
* @qid: The AP queue number
|
||||
* @sec_idx: The secret index
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
*
|
||||
* Invoking this function in a non-SE environment
|
||||
* may case a specification exception.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
|
||||
{
|
||||
unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
|
||||
unsigned long reg2 = sec_idx;
|
||||
union ap_queue_status_reg reg1;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
|
||||
" lgr 2,%[reg2]\n" /* secret index into gr2 */
|
||||
" .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
|
||||
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
|
||||
: [reg1] "=&d" (reg1.value)
|
||||
: [reg0] "d" (reg0), [reg2] "d" (reg2)
|
||||
: "cc", "0", "1", "2");
|
||||
|
||||
return reg1.status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_nqap(): Send message to adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
|
@ -359,10 +457,11 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
|
|||
* ap_dqap(): Receive message from adjunct processor queue.
|
||||
* @qid: The AP queue number
|
||||
* @psmid: Pointer to program supplied message identifier
|
||||
* @msg: The message text
|
||||
* @length: The message length
|
||||
* @reslength: Resitual length on return
|
||||
* @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
|
||||
* @msg: Pointer to message buffer
|
||||
* @msglen: Message buffer size
|
||||
* @length: Pointer to length of actually written bytes
|
||||
* @reslength: Residual length on return
|
||||
* @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
* Condition code 1 on DQAP means the receive has taken place
|
||||
|
@ -386,8 +485,9 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
|
|||
* *resgr0 is to be used instead of qid to further process this entry.
|
||||
*/
|
||||
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
|
||||
unsigned long long *psmid,
|
||||
void *msg, size_t length,
|
||||
unsigned long *psmid,
|
||||
void *msg, size_t msglen,
|
||||
size_t *length,
|
||||
size_t *reslength,
|
||||
unsigned long *resgr0)
|
||||
{
|
||||
|
@ -399,7 +499,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
|
|||
rp1.even = 0UL;
|
||||
rp1.odd = 0UL;
|
||||
rp2.even = (unsigned long)msg;
|
||||
rp2.odd = (unsigned long)length;
|
||||
rp2.odd = (unsigned long)msglen;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[reg0]\n" /* qid param into gr0 */
|
||||
|
@ -429,11 +529,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
|
|||
if (resgr0)
|
||||
*resgr0 = reg0;
|
||||
} else {
|
||||
*psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
|
||||
*psmid = (rp1.even << 32) + rp1.odd;
|
||||
if (resgr0)
|
||||
*resgr0 = 0;
|
||||
}
|
||||
|
||||
/* update *length with the nr of bytes stored into the msg buffer */
|
||||
if (length)
|
||||
*length = msglen - rp2.odd;
|
||||
|
||||
return reg1.status;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,13 +12,7 @@
|
|||
#ifndef _S390_CHECKSUM_H
|
||||
#define _S390_CHECKSUM_H
|
||||
|
||||
#ifdef CONFIG_GENERIC_CSUM
|
||||
|
||||
#include <asm-generic/checksum.h>
|
||||
|
||||
#else /* CONFIG_GENERIC_CSUM */
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kasan-checks.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
/*
|
||||
|
@ -40,6 +34,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
|
|||
.odd = (unsigned long) len,
|
||||
};
|
||||
|
||||
kasan_check_read(buff, len);
|
||||
asm volatile(
|
||||
"0: cksm %[sum],%[rp]\n"
|
||||
" jo 0b\n"
|
||||
|
@ -135,5 +130,4 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
|||
return csum_fold((__force __wsum)(sum >> 32));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_GENERIC_CSUM */
|
||||
#endif /* _S390_CHECKSUM_H */
|
||||
|
|
|
@ -90,7 +90,7 @@ struct diag8c {
|
|||
u8 num_partitions;
|
||||
u16 width;
|
||||
u16 height;
|
||||
u8 data[0];
|
||||
u8 data[];
|
||||
} __packed __aligned(4);
|
||||
|
||||
extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno);
|
||||
|
|
|
@ -60,9 +60,4 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
|||
|
||||
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
|
||||
|
||||
static inline bool on_thread_stack(void)
|
||||
{
|
||||
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -286,7 +286,7 @@ struct tccb_tcat {
|
|||
*/
|
||||
struct tccb {
|
||||
struct tccb_tcah tcah;
|
||||
u8 tca[0];
|
||||
u8 tca[];
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
struct tcw *tcw_get_intrg(struct tcw *tcw);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef __ASM_KASAN_H
|
||||
#define __ASM_KASAN_H
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
|
@ -13,35 +13,6 @@
|
|||
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||
|
||||
extern void kasan_early_init(void);
|
||||
|
||||
/*
|
||||
* Estimate kasan memory requirements, which it will reserve
|
||||
* at the very end of available physical memory. To estimate
|
||||
* that, we take into account that kasan would require
|
||||
* 1/8 of available physical memory (for shadow memory) +
|
||||
* creating page tables for the shadow memory region.
|
||||
* To keep page tables estimates simple take the double of
|
||||
* combined ptes size.
|
||||
*
|
||||
* physmem parameter has to be already adjusted if not entire physical memory
|
||||
* would be used (e.g. due to effect of "mem=" option).
|
||||
*/
|
||||
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
|
||||
{
|
||||
unsigned long kasan_needs;
|
||||
unsigned long pages;
|
||||
/* for shadow memory */
|
||||
kasan_needs = round_up(physmem / 8, PAGE_SIZE);
|
||||
/* for paging structures */
|
||||
pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
|
||||
kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
|
||||
|
||||
return kasan_needs;
|
||||
}
|
||||
#else
|
||||
static inline void kasan_early_init(void) { }
|
||||
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#define __ALIGN .align 16, 0x07
|
||||
#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_S390_MEM_DETECT_H
|
||||
#define _ASM_S390_MEM_DETECT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum mem_info_source {
|
||||
MEM_DETECT_NONE = 0,
|
||||
MEM_DETECT_SCLP_STOR_INFO,
|
||||
MEM_DETECT_DIAG260,
|
||||
MEM_DETECT_SCLP_READ_INFO,
|
||||
MEM_DETECT_BIN_SEARCH
|
||||
};
|
||||
|
||||
struct mem_detect_block {
|
||||
u64 start;
|
||||
u64 end;
|
||||
};
|
||||
|
||||
/*
|
||||
* Storage element id is defined as 1 byte (up to 256 storage elements).
|
||||
* In practise only storage element id 0 and 1 are used).
|
||||
* According to architecture one storage element could have as much as
|
||||
* 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
|
||||
* If more mem_detect_blocks are required, a block of memory from already
|
||||
* known mem_detect_block is taken (entries_extended points to it).
|
||||
*/
|
||||
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
|
||||
|
||||
struct mem_detect_info {
|
||||
u32 count;
|
||||
u8 info_source;
|
||||
unsigned long usable;
|
||||
struct mem_detect_block entries[MEM_INLINED_ENTRIES];
|
||||
struct mem_detect_block *entries_extended;
|
||||
};
|
||||
extern struct mem_detect_info mem_detect;
|
||||
|
||||
void add_mem_detect_block(u64 start, u64 end);
|
||||
|
||||
static inline int __get_mem_detect_block(u32 n, unsigned long *start,
|
||||
unsigned long *end, bool respect_usable_limit)
|
||||
{
|
||||
if (n >= mem_detect.count) {
|
||||
*start = 0;
|
||||
*end = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (n < MEM_INLINED_ENTRIES) {
|
||||
*start = (unsigned long)mem_detect.entries[n].start;
|
||||
*end = (unsigned long)mem_detect.entries[n].end;
|
||||
} else {
|
||||
*start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
|
||||
*end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
|
||||
}
|
||||
|
||||
if (respect_usable_limit && mem_detect.usable) {
|
||||
if (*start >= mem_detect.usable)
|
||||
return -1;
|
||||
if (*end > mem_detect.usable)
|
||||
*end = mem_detect.usable;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_mem_detect_usable_block - early online memory range iterator
|
||||
* @i: an integer used as loop variable
|
||||
* @p_start: ptr to unsigned long for start address of the range
|
||||
* @p_end: ptr to unsigned long for end address of the range
|
||||
*
|
||||
* Walks over detected online memory ranges below usable limit.
|
||||
*/
|
||||
#define for_each_mem_detect_usable_block(i, p_start, p_end) \
|
||||
for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
|
||||
|
||||
/* Walks over all detected online memory ranges disregarding usable limit. */
|
||||
#define for_each_mem_detect_block(i, p_start, p_end) \
|
||||
for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
|
||||
|
||||
static inline unsigned long get_mem_detect_usable_total(void)
|
||||
{
|
||||
unsigned long start, end, total = 0;
|
||||
int i;
|
||||
|
||||
for_each_mem_detect_usable_block(i, &start, &end)
|
||||
total += end - start;
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
static inline void get_mem_detect_reserved(unsigned long *start,
|
||||
unsigned long *size)
|
||||
{
|
||||
*start = (unsigned long)mem_detect.entries_extended;
|
||||
if (mem_detect.count > MEM_INLINED_ENTRIES)
|
||||
*size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
|
||||
else
|
||||
*size = 0;
|
||||
}
|
||||
|
||||
static inline unsigned long get_mem_detect_end(void)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
|
||||
if (mem_detect.usable)
|
||||
return mem_detect.usable;
|
||||
if (mem_detect.count) {
|
||||
__get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
|
||||
return end;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -2,6 +2,7 @@
|
|||
#ifndef _ASM_S390_NOSPEC_ASM_H
|
||||
#define _ASM_S390_NOSPEC_ASM_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf.h>
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
@ -16,7 +17,7 @@
|
|||
.macro __THUNK_PROLOG_NAME name
|
||||
#ifdef CONFIG_EXPOLINE_EXTERN
|
||||
.pushsection .text,"ax",@progbits
|
||||
.align 16,0x07
|
||||
__ALIGN
|
||||
#else
|
||||
.pushsection .text.\name,"axG",@progbits,\name,comdat
|
||||
#endif
|
||||
|
|
|
@ -60,7 +60,6 @@ struct perf_sf_sde_regs {
|
|||
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
|
||||
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
|
||||
PERF_CPUM_SF_DIAG_MODE)
|
||||
#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
|
||||
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
|
||||
|
||||
#define REG_NONE 0
|
||||
|
@ -71,7 +70,6 @@ struct perf_sf_sde_regs {
|
|||
#define SAMPL_RATE(hwc) ((hwc)->event_base)
|
||||
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
|
||||
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
|
||||
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
|
||||
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
|
||||
|
||||
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
|
||||
|
|
|
@ -34,7 +34,7 @@ enum {
|
|||
PG_DIRECT_MAP_MAX
|
||||
};
|
||||
|
||||
extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
|
||||
extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
|
||||
|
||||
static inline void update_page_count(int level, long count)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_S390_MEM_DETECT_H
|
||||
#define _ASM_S390_MEM_DETECT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum physmem_info_source {
|
||||
MEM_DETECT_NONE = 0,
|
||||
MEM_DETECT_SCLP_STOR_INFO,
|
||||
MEM_DETECT_DIAG260,
|
||||
MEM_DETECT_SCLP_READ_INFO,
|
||||
MEM_DETECT_BIN_SEARCH
|
||||
};
|
||||
|
||||
struct physmem_range {
|
||||
u64 start;
|
||||
u64 end;
|
||||
};
|
||||
|
||||
enum reserved_range_type {
|
||||
RR_DECOMPRESSOR,
|
||||
RR_INITRD,
|
||||
RR_VMLINUX,
|
||||
RR_AMODE31,
|
||||
RR_IPLREPORT,
|
||||
RR_CERT_COMP_LIST,
|
||||
RR_MEM_DETECT_EXTENDED,
|
||||
RR_VMEM,
|
||||
RR_MAX
|
||||
};
|
||||
|
||||
struct reserved_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
struct reserved_range *chain;
|
||||
};
|
||||
|
||||
/*
|
||||
* Storage element id is defined as 1 byte (up to 256 storage elements).
|
||||
* In practise only storage element id 0 and 1 are used).
|
||||
* According to architecture one storage element could have as much as
|
||||
* 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
|
||||
* If more physmem_ranges are required, a block of memory from already
|
||||
* known physmem_range is taken (online_extended points to it).
|
||||
*/
|
||||
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
|
||||
|
||||
struct physmem_info {
|
||||
u32 range_count;
|
||||
u8 info_source;
|
||||
unsigned long usable;
|
||||
struct reserved_range reserved[RR_MAX];
|
||||
struct physmem_range online[MEM_INLINED_ENTRIES];
|
||||
struct physmem_range *online_extended;
|
||||
};
|
||||
|
||||
extern struct physmem_info physmem_info;
|
||||
|
||||
void add_physmem_online_range(u64 start, u64 end);
|
||||
|
||||
static inline int __get_physmem_range(u32 n, unsigned long *start,
|
||||
unsigned long *end, bool respect_usable_limit)
|
||||
{
|
||||
if (n >= physmem_info.range_count) {
|
||||
*start = 0;
|
||||
*end = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (n < MEM_INLINED_ENTRIES) {
|
||||
*start = (unsigned long)physmem_info.online[n].start;
|
||||
*end = (unsigned long)physmem_info.online[n].end;
|
||||
} else {
|
||||
*start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
|
||||
*end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
|
||||
}
|
||||
|
||||
if (respect_usable_limit && physmem_info.usable) {
|
||||
if (*start >= physmem_info.usable)
|
||||
return -1;
|
||||
if (*end > physmem_info.usable)
|
||||
*end = physmem_info.usable;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_physmem_usable_range - early online memory range iterator
|
||||
* @i: an integer used as loop variable
|
||||
* @p_start: ptr to unsigned long for start address of the range
|
||||
* @p_end: ptr to unsigned long for end address of the range
|
||||
*
|
||||
* Walks over detected online memory ranges below usable limit.
|
||||
*/
|
||||
#define for_each_physmem_usable_range(i, p_start, p_end) \
|
||||
for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
|
||||
|
||||
/* Walks over all detected online memory ranges disregarding usable limit. */
|
||||
#define for_each_physmem_online_range(i, p_start, p_end) \
|
||||
for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
|
||||
|
||||
static inline const char *get_physmem_info_source(void)
|
||||
{
|
||||
switch (physmem_info.info_source) {
|
||||
case MEM_DETECT_SCLP_STOR_INFO:
|
||||
return "sclp storage info";
|
||||
case MEM_DETECT_DIAG260:
|
||||
return "diag260";
|
||||
case MEM_DETECT_SCLP_READ_INFO:
|
||||
return "sclp read info";
|
||||
case MEM_DETECT_BIN_SEARCH:
|
||||
return "binary search";
|
||||
}
|
||||
return "none";
|
||||
}
|
||||
|
||||
#define RR_TYPE_NAME(t) case RR_ ## t: return #t
|
||||
static inline const char *get_rr_type_name(enum reserved_range_type t)
|
||||
{
|
||||
switch (t) {
|
||||
RR_TYPE_NAME(DECOMPRESSOR);
|
||||
RR_TYPE_NAME(INITRD);
|
||||
RR_TYPE_NAME(VMLINUX);
|
||||
RR_TYPE_NAME(AMODE31);
|
||||
RR_TYPE_NAME(IPLREPORT);
|
||||
RR_TYPE_NAME(CERT_COMP_LIST);
|
||||
RR_TYPE_NAME(MEM_DETECT_EXTENDED);
|
||||
RR_TYPE_NAME(VMEM);
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
|
||||
for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
|
||||
range && range->end; range = range->chain, \
|
||||
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
|
||||
|
||||
static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
|
||||
struct reserved_range *range)
|
||||
{
|
||||
if (!range) {
|
||||
range = &physmem_info.reserved[*t];
|
||||
if (range->end)
|
||||
return range;
|
||||
}
|
||||
if (range->chain)
|
||||
return range->chain;
|
||||
while (++*t < RR_MAX) {
|
||||
range = &physmem_info.reserved[*t];
|
||||
if (range->end)
|
||||
return range;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
|
||||
for (t = 0, range = __physmem_reserved_next(&t, NULL), \
|
||||
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
|
||||
range; range = __physmem_reserved_next(&t, range), \
|
||||
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
|
||||
|
||||
static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
|
||||
unsigned long *addr, unsigned long *size)
|
||||
{
|
||||
*addr = physmem_info.reserved[type].start;
|
||||
*size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
|
||||
return *size;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -99,7 +99,6 @@ void cpu_detect_mhz_feature(void);
|
|||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
extern void execve_tail(void);
|
||||
extern void __bpon(void);
|
||||
unsigned long vdso_size(void);
|
||||
|
||||
/*
|
||||
|
@ -119,6 +118,41 @@ unsigned long vdso_size(void);
|
|||
|
||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
|
||||
#define __stackleak_poison __stackleak_poison
|
||||
static __always_inline void __stackleak_poison(unsigned long erase_low,
|
||||
unsigned long erase_high,
|
||||
unsigned long poison)
|
||||
{
|
||||
unsigned long tmp, count;
|
||||
|
||||
count = erase_high - erase_low;
|
||||
if (!count)
|
||||
return;
|
||||
asm volatile(
|
||||
" cghi %[count],8\n"
|
||||
" je 2f\n"
|
||||
" aghi %[count],-(8+1)\n"
|
||||
" srlg %[tmp],%[count],8\n"
|
||||
" ltgr %[tmp],%[tmp]\n"
|
||||
" jz 1f\n"
|
||||
"0: stg %[poison],0(%[addr])\n"
|
||||
" mvc 8(256-8,%[addr]),0(%[addr])\n"
|
||||
" la %[addr],256(%[addr])\n"
|
||||
" brctg %[tmp],0b\n"
|
||||
"1: stg %[poison],0(%[addr])\n"
|
||||
" larl %[tmp],3f\n"
|
||||
" ex %[count],0(%[tmp])\n"
|
||||
" j 4f\n"
|
||||
"2: stg %[poison],0(%[addr])\n"
|
||||
" j 4f\n"
|
||||
"3: mvc 8(1,%[addr]),0(%[addr])\n"
|
||||
"4:\n"
|
||||
: [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
|
||||
: [poison] "d" (poison)
|
||||
: "memory", "cc"
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Thread structure
|
||||
*/
|
||||
|
@ -227,6 +261,13 @@ static __always_inline unsigned long __current_stack_pointer(void)
|
|||
return sp;
|
||||
}
|
||||
|
||||
static __always_inline bool on_thread_stack(void)
|
||||
{
|
||||
unsigned long ksp = S390_lowcore.kernel_stack;
|
||||
|
||||
return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
|
||||
}
|
||||
|
||||
static __always_inline unsigned short stap(void)
|
||||
{
|
||||
unsigned short cpu_address;
|
||||
|
@ -329,9 +370,6 @@ static __always_inline void __noreturn disabled_wait(void)
|
|||
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
|
||||
|
||||
extern int s390_isolate_bp(void);
|
||||
extern int s390_isolate_bp_guest(void);
|
||||
|
||||
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
|
||||
{
|
||||
return arch_irqs_disabled_flags(regs->psw.mask);
|
||||
|
|
|
@ -6,11 +6,23 @@
|
|||
|
||||
extern struct mutex cpa_mutex;
|
||||
|
||||
#define SET_MEMORY_RO 1UL
|
||||
#define SET_MEMORY_RW 2UL
|
||||
#define SET_MEMORY_NX 4UL
|
||||
#define SET_MEMORY_X 8UL
|
||||
#define SET_MEMORY_4K 16UL
|
||||
enum {
|
||||
_SET_MEMORY_RO_BIT,
|
||||
_SET_MEMORY_RW_BIT,
|
||||
_SET_MEMORY_NX_BIT,
|
||||
_SET_MEMORY_X_BIT,
|
||||
_SET_MEMORY_4K_BIT,
|
||||
_SET_MEMORY_INV_BIT,
|
||||
_SET_MEMORY_DEF_BIT,
|
||||
};
|
||||
|
||||
#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT)
|
||||
#define SET_MEMORY_RW BIT(_SET_MEMORY_RW_BIT)
|
||||
#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT)
|
||||
#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT)
|
||||
#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT)
|
||||
#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
|
||||
#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
|
||||
|
||||
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
|
||||
|
||||
|
@ -34,9 +46,23 @@ static inline int set_memory_x(unsigned long addr, int numpages)
|
|||
return __set_memory(addr, numpages, SET_MEMORY_X);
|
||||
}
|
||||
|
||||
#define set_memory_rox set_memory_rox
|
||||
static inline int set_memory_rox(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X);
|
||||
}
|
||||
|
||||
static inline int set_memory_rwnx(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX);
|
||||
}
|
||||
|
||||
static inline int set_memory_4k(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_4K);
|
||||
}
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page);
|
||||
int set_direct_map_default_noflush(struct page *page);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -74,10 +74,6 @@ extern unsigned int zlib_dfltcc_support;
|
|||
|
||||
extern int noexec_disabled;
|
||||
extern unsigned long ident_map_size;
|
||||
extern unsigned long pgalloc_pos;
|
||||
extern unsigned long pgalloc_end;
|
||||
extern unsigned long pgalloc_low;
|
||||
extern unsigned long __amode31_base;
|
||||
|
||||
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
|
||||
extern unsigned long mio_wb_bit_mask;
|
||||
|
@ -150,13 +146,13 @@ static inline unsigned long kaslr_offset(void)
|
|||
return __kaslr_offset;
|
||||
}
|
||||
|
||||
extern int is_full_image;
|
||||
|
||||
struct initrd_data {
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
};
|
||||
extern struct initrd_data initrd_data;
|
||||
extern int __kaslr_enabled;
|
||||
static inline int kaslr_enabled(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||
return __kaslr_enabled;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct oldmem_data {
|
||||
unsigned long start;
|
||||
|
@ -164,7 +160,7 @@ struct oldmem_data {
|
|||
};
|
||||
extern struct oldmem_data oldmem_data;
|
||||
|
||||
static inline u32 gen_lpswe(unsigned long addr)
|
||||
static __always_inline u32 gen_lpswe(unsigned long addr)
|
||||
{
|
||||
BUILD_BUG_ON(addr > 0xfff);
|
||||
return 0xb2b20000 | addr;
|
||||
|
|
|
@ -189,17 +189,53 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
|
|||
(rettype)r2; \
|
||||
})
|
||||
|
||||
#define call_on_stack_noreturn(fn, stack) \
|
||||
/*
|
||||
* Use call_nodat() to call a function with DAT disabled.
|
||||
* Proper sign and zero extension of function arguments is done.
|
||||
* Usage:
|
||||
*
|
||||
* rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
|
||||
*
|
||||
* - nr specifies the number of function arguments of fn.
|
||||
* - fn is the function to be called, where fn is a physical address.
|
||||
* - rettype is the return type of fn.
|
||||
* - t1, a1, ... are pairs, where t1 must match the type of the first
|
||||
* argument of fn, t2 the second, etc. a1 is the corresponding
|
||||
* first function argument (not name), etc.
|
||||
*
|
||||
* fn() is called with standard C function call ABI, with the exception
|
||||
* that no useful stackframe or stackpointer is passed via register 15.
|
||||
* Therefore the called function must not use r15 to access the stack.
|
||||
*/
|
||||
#define call_nodat(nr, rettype, fn, ...) \
|
||||
({ \
|
||||
void (*__fn)(void) = fn; \
|
||||
rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
|
||||
/* aligned since psw_leave must not cross page boundary */ \
|
||||
psw_t __aligned(16) psw_leave; \
|
||||
psw_t psw_enter; \
|
||||
CALL_LARGS_##nr(__VA_ARGS__); \
|
||||
CALL_REGS_##nr; \
|
||||
\
|
||||
CALL_TYPECHECK_##nr(__VA_ARGS__); \
|
||||
psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
|
||||
psw_enter.addr = (unsigned long)__fn; \
|
||||
asm volatile( \
|
||||
" la 15,0(%[_stack])\n" \
|
||||
" xc %[_bc](8,15),%[_bc](15)\n" \
|
||||
" brasl 14,%[_fn]\n" \
|
||||
::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
|
||||
[_stack] "a" (stack), [_fn] "X" (__fn)); \
|
||||
BUG(); \
|
||||
" epsw 0,1\n" \
|
||||
" risbg 1,0,0,31,32\n" \
|
||||
" larl 7,1f\n" \
|
||||
" stg 1,%[psw_leave]\n" \
|
||||
" stg 7,8+%[psw_leave]\n" \
|
||||
" la 7,%[psw_leave]\n" \
|
||||
" lra 7,0(7)\n" \
|
||||
" larl 1,0f\n" \
|
||||
" lra 14,0(1)\n" \
|
||||
" lpswe %[psw_enter]\n" \
|
||||
"0: lpswe 0(7)\n" \
|
||||
"1:\n" \
|
||||
: CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
|
||||
: [psw_enter] "Q" (psw_enter) \
|
||||
: "7", CALL_CLOBBER_##nr); \
|
||||
(rettype)r2; \
|
||||
})
|
||||
|
||||
#endif /* _ASM_S390_STACKTRACE_H */
|
||||
|
|
|
@ -55,18 +55,6 @@ char *strstr(const char *s1, const char *s2);
|
|||
|
||||
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
||||
|
||||
extern void *__memcpy(void *dest, const void *src, size_t n);
|
||||
extern void *__memset(void *s, int c, size_t n);
|
||||
extern void *__memmove(void *dest, const void *src, size_t n);
|
||||
|
||||
/*
|
||||
* For files that are not instrumented (e.g. mm/slub.c) we
|
||||
* should use not instrumented version of mem* functions.
|
||||
*/
|
||||
|
||||
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
||||
#define memmove(dst, src, len) __memmove(dst, src, len)
|
||||
#define memset(s, c, n) __memset(s, c, n)
|
||||
#define strlen(s) __strlen(s)
|
||||
|
||||
#define __no_sanitize_prefix_strfunc(x) __##x
|
||||
|
@ -79,6 +67,9 @@ extern void *__memmove(void *dest, const void *src, size_t n);
|
|||
#define __no_sanitize_prefix_strfunc(x) x
|
||||
#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
|
||||
|
||||
void *__memcpy(void *dest, const void *src, size_t n);
|
||||
void *__memset(void *s, int c, size_t n);
|
||||
void *__memmove(void *dest, const void *src, size_t n);
|
||||
void *__memset16(uint16_t *s, uint16_t v, size_t count);
|
||||
void *__memset32(uint32_t *s, uint32_t v, size_t count);
|
||||
void *__memset64(uint64_t *s, uint64_t v, size_t count);
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
#define _ASM_THREAD_INFO_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
#ifndef ASM_OFFSETS_C
|
||||
#include <asm/asm-offsets.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* General size of kernel stacks
|
||||
|
@ -21,13 +24,12 @@
|
|||
#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
|
||||
#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define STACK_INIT_OFFSET \
|
||||
(THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
|
||||
|
||||
/*
|
||||
* low level task data that entry.S needs immediate access to
|
||||
* - this struct should fit entirely inside of one cache line
|
||||
|
@ -70,7 +72,6 @@ void arch_setup_new_exec(void);
|
|||
#define TIF_PATCH_PENDING 5 /* pending live patching update */
|
||||
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
|
||||
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
||||
#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
|
||||
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
|
||||
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
|
||||
|
||||
|
@ -94,7 +95,6 @@ void arch_setup_new_exec(void);
|
|||
#define _TIF_UPROBE BIT(TIF_UPROBE)
|
||||
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
|
||||
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
|
||||
#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
|
||||
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
|
||||
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ typedef struct {
|
|||
* except of floats, and long long (32 bit)
|
||||
*
|
||||
*/
|
||||
long args[0];
|
||||
long args[];
|
||||
} debug_sprintf_entry_t;
|
||||
|
||||
/* internal function prototyes */
|
||||
|
@ -981,16 +981,6 @@ static struct ctl_table s390dbf_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table s390dbf_dir_table[] = {
|
||||
{
|
||||
.procname = "s390dbf",
|
||||
.maxlen = 0,
|
||||
.mode = S_IRUGO | S_IXUGO,
|
||||
.child = s390dbf_table,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table_header *s390dbf_sysctl_header;
|
||||
|
||||
/**
|
||||
|
@ -1574,7 +1564,7 @@ out:
|
|||
*/
|
||||
static int __init debug_init(void)
|
||||
{
|
||||
s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
|
||||
s390dbf_sysctl_header = register_sysctl("s390dbf", s390dbf_table);
|
||||
mutex_lock(&debug_mutex);
|
||||
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
|
||||
initialized = 1;
|
||||
|
|
|
@ -41,60 +41,50 @@ const char *stack_type_name(enum stack_type type)
|
|||
EXPORT_SYMBOL_GPL(stack_type_name);
|
||||
|
||||
static inline bool in_stack(unsigned long sp, struct stack_info *info,
|
||||
enum stack_type type, unsigned long low,
|
||||
unsigned long high)
|
||||
enum stack_type type, unsigned long stack)
|
||||
{
|
||||
if (sp < low || sp >= high)
|
||||
if (sp < stack || sp >= stack + THREAD_SIZE)
|
||||
return false;
|
||||
info->type = type;
|
||||
info->begin = low;
|
||||
info->end = high;
|
||||
info->begin = stack;
|
||||
info->end = stack + THREAD_SIZE;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool in_task_stack(unsigned long sp, struct task_struct *task,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long stack;
|
||||
unsigned long stack = (unsigned long)task_stack_page(task);
|
||||
|
||||
stack = (unsigned long) task_stack_page(task);
|
||||
return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
|
||||
return in_stack(sp, info, STACK_TYPE_TASK, stack);
|
||||
}
|
||||
|
||||
static bool in_irq_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long frame_size, top;
|
||||
unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
|
||||
|
||||
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
|
||||
top = S390_lowcore.async_stack + frame_size;
|
||||
return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
|
||||
return in_stack(sp, info, STACK_TYPE_IRQ, stack);
|
||||
}
|
||||
|
||||
static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long frame_size, top;
|
||||
unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
|
||||
|
||||
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
|
||||
top = S390_lowcore.nodat_stack + frame_size;
|
||||
return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
|
||||
return in_stack(sp, info, STACK_TYPE_NODAT, stack);
|
||||
}
|
||||
|
||||
static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long frame_size, top;
|
||||
unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
|
||||
|
||||
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
|
||||
top = S390_lowcore.mcck_stack + frame_size;
|
||||
return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
|
||||
return in_stack(sp, info, STACK_TYPE_MCCK, stack);
|
||||
}
|
||||
|
||||
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long frame_size, top;
|
||||
unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
|
||||
|
||||
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
|
||||
top = S390_lowcore.restart_stack + frame_size;
|
||||
return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
|
||||
return in_stack(sp, info, STACK_TYPE_RESTART, stack);
|
||||
}
|
||||
|
||||
int get_stack_info(unsigned long sp, struct task_struct *task,
|
||||
|
@ -152,7 +142,13 @@ void show_stack(struct task_struct *task, unsigned long *stack,
|
|||
static void show_last_breaking_event(struct pt_regs *regs)
|
||||
{
|
||||
printk("Last Breaking-Event-Address:\n");
|
||||
printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break);
|
||||
printk(" [<%016lx>] ", regs->last_break);
|
||||
if (user_mode(regs)) {
|
||||
print_vma_addr(KERN_CONT, regs->last_break);
|
||||
pr_cont("\n");
|
||||
} else {
|
||||
pr_cont("%pSR\n", (void *)regs->last_break);
|
||||
}
|
||||
}
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
|
|
@ -34,8 +34,6 @@
|
|||
#include <asm/switch_to.h>
|
||||
#include "entry.h"
|
||||
|
||||
int __bootdata(is_full_image);
|
||||
|
||||
#define decompressor_handled_param(param) \
|
||||
static int __init ignore_decompressor_param_##param(char *s) \
|
||||
{ \
|
||||
|
@ -53,6 +51,14 @@ decompressor_handled_param(nokaslr);
|
|||
decompressor_handled_param(prot_virt);
|
||||
#endif
|
||||
|
||||
static void __init kasan_early_init(void)
|
||||
{
|
||||
#ifdef CONFIG_KASAN
|
||||
init_task.kasan_depth = 0;
|
||||
sclp_early_printk("KernelAddressSanitizer initialized\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init reset_tod_clock(void)
|
||||
{
|
||||
union tod_clock clk;
|
||||
|
@ -288,17 +294,6 @@ static void __init setup_boot_command_line(void)
|
|||
strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
|
||||
}
|
||||
|
||||
static void __init check_image_bootable(void)
|
||||
{
|
||||
if (is_full_image)
|
||||
return;
|
||||
|
||||
sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
|
||||
sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
|
||||
sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
|
||||
disabled_wait();
|
||||
}
|
||||
|
||||
static void __init sort_amode31_extable(void)
|
||||
{
|
||||
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
|
||||
|
@ -306,8 +301,8 @@ static void __init sort_amode31_extable(void)
|
|||
|
||||
void __init startup_init(void)
|
||||
{
|
||||
kasan_early_init();
|
||||
reset_tod_clock();
|
||||
check_image_bootable();
|
||||
time_early_init();
|
||||
init_kernel_storage_key();
|
||||
lockdep_off();
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
ENTRY(early_pgm_check_handler)
|
||||
SYM_CODE_START(early_pgm_check_handler)
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
|
@ -20,4 +20,4 @@ ENTRY(early_pgm_check_handler)
|
|||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
lpswe __LC_RETURN_PSW
|
||||
ENDPROC(early_pgm_check_handler)
|
||||
SYM_CODE_END(early_pgm_check_handler)
|
||||
|
|
|
@ -29,10 +29,6 @@
|
|||
#include <asm/export.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
|
||||
STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
|
||||
STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
_LPP_OFFSET = __LC_LPP
|
||||
|
||||
.macro STBEAR address
|
||||
|
@ -53,7 +49,7 @@ _LPP_OFFSET = __LC_LPP
|
|||
|
||||
.macro CHECK_STACK savearea
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
|
||||
tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD
|
||||
lghi %r14,\savearea
|
||||
jz stack_overflow
|
||||
#endif
|
||||
|
@ -62,8 +58,8 @@ _LPP_OFFSET = __LC_LPP
|
|||
.macro CHECK_VMAP_STACK savearea,oklabel
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
lgr %r14,%r15
|
||||
nill %r14,0x10000 - STACK_SIZE
|
||||
oill %r14,STACK_INIT
|
||||
nill %r14,0x10000 - THREAD_SIZE
|
||||
oill %r14,STACK_INIT_OFFSET
|
||||
clg %r14,__LC_KERNEL_STACK
|
||||
je \oklabel
|
||||
clg %r14,__LC_ASYNC_STACK
|
||||
|
@ -154,26 +150,26 @@ _LPP_OFFSET = __LC_LPP
|
|||
.endm
|
||||
#endif
|
||||
|
||||
.macro STACKLEAK_ERASE
|
||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
brasl %r14,stackleak_erase_on_task_stack
|
||||
#endif
|
||||
.endm
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
.Ldummy:
|
||||
/*
|
||||
* This nop exists only in order to avoid that __bpon starts at
|
||||
* the beginning of the kprobes text section. In that case we would
|
||||
* have several symbols at the same address. E.g. objdump would take
|
||||
* an arbitrary symbol name when disassembling this code.
|
||||
* With the added nop in between the __bpon symbol is unique
|
||||
* again.
|
||||
* The following nop exists only in order to avoid that the next
|
||||
* symbol starts at the beginning of the kprobes text section.
|
||||
* In that case there would be several symbols at the same address.
|
||||
* E.g. objdump would take an arbitrary symbol when disassembling
|
||||
* the code.
|
||||
* With the added nop in between this cannot happen.
|
||||
*/
|
||||
nop 0
|
||||
|
||||
ENTRY(__bpon)
|
||||
.globl __bpon
|
||||
BPON
|
||||
BR_EX %r14
|
||||
ENDPROC(__bpon)
|
||||
|
||||
/*
|
||||
* Scheduler resume function, called by switch_to
|
||||
* gpr2 = (task_struct *) prev
|
||||
|
@ -181,11 +177,11 @@ ENDPROC(__bpon)
|
|||
* Returns:
|
||||
* gpr2 = prev
|
||||
*/
|
||||
ENTRY(__switch_to)
|
||||
SYM_FUNC_START(__switch_to)
|
||||
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
|
||||
lghi %r4,__TASK_stack
|
||||
lghi %r1,__TASK_thread
|
||||
llill %r5,STACK_INIT
|
||||
llill %r5,STACK_INIT_OFFSET
|
||||
stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
|
||||
lg %r15,0(%r4,%r3) # start of kernel stack of next
|
||||
agr %r15,%r5 # end of kernel stack of next
|
||||
|
@ -197,7 +193,7 @@ ENTRY(__switch_to)
|
|||
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||
ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
|
||||
BR_EX %r14
|
||||
ENDPROC(__switch_to)
|
||||
SYM_FUNC_END(__switch_to)
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
/*
|
||||
|
@ -206,7 +202,7 @@ ENDPROC(__switch_to)
|
|||
* %r3 pointer to sie control block virt
|
||||
* %r4 guest register save area
|
||||
*/
|
||||
ENTRY(__sie64a)
|
||||
SYM_FUNC_START(__sie64a)
|
||||
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
|
||||
lg %r12,__LC_CURRENT
|
||||
stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
|
||||
|
@ -227,7 +223,7 @@ ENTRY(__sie64a)
|
|||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||
jo .Lsie_skip # exit if fp/vx regs changed
|
||||
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
|
||||
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
.Lsie_entry:
|
||||
sie 0(%r14)
|
||||
# Let the next instruction be NOP to avoid triggering a machine check
|
||||
|
@ -235,7 +231,7 @@ ENTRY(__sie64a)
|
|||
nopr 7
|
||||
.Lsie_leave:
|
||||
BPOFF
|
||||
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
.Lsie_skip:
|
||||
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
|
||||
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
|
||||
|
@ -252,8 +248,7 @@ ENTRY(__sie64a)
|
|||
nopr 7
|
||||
.Lrewind_pad2:
|
||||
nopr 7
|
||||
.globl sie_exit
|
||||
sie_exit:
|
||||
SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
|
||||
lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
|
||||
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
|
||||
xgr %r0,%r0 # clear guest registers to
|
||||
|
@ -273,7 +268,7 @@ sie_exit:
|
|||
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
|
||||
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
|
||||
EX_TABLE(sie_exit,.Lsie_fault)
|
||||
ENDPROC(__sie64a)
|
||||
SYM_FUNC_END(__sie64a)
|
||||
EXPORT_SYMBOL(__sie64a)
|
||||
EXPORT_SYMBOL(sie_exit)
|
||||
#endif
|
||||
|
@ -283,7 +278,7 @@ EXPORT_SYMBOL(sie_exit)
|
|||
* are entered with interrupts disabled.
|
||||
*/
|
||||
|
||||
ENTRY(system_call)
|
||||
SYM_CODE_START(system_call)
|
||||
stpt __LC_SYS_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
BPOFF
|
||||
|
@ -291,11 +286,9 @@ ENTRY(system_call)
|
|||
.Lsysc_per:
|
||||
STBEAR __LC_LAST_BREAK
|
||||
lctlg %c1,%c1,__LC_KERNEL_ASCE
|
||||
lg %r12,__LC_CURRENT
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
# clear user controlled register to prevent speculative use
|
||||
xgr %r0,%r0
|
||||
xgr %r1,%r1
|
||||
|
@ -312,39 +305,40 @@ ENTRY(system_call)
|
|||
MBEAR %r2
|
||||
lgr %r3,%r14
|
||||
brasl %r14,__do_syscall
|
||||
STACKLEAK_ERASE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
BPON
|
||||
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
stpt __LC_EXIT_TIMER
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
ENDPROC(system_call)
|
||||
SYM_CODE_END(system_call)
|
||||
|
||||
#
|
||||
# a new process exits the kernel with ret_from_fork
|
||||
#
|
||||
ENTRY(ret_from_fork)
|
||||
SYM_CODE_START(ret_from_fork)
|
||||
lgr %r3,%r11
|
||||
brasl %r14,__ret_from_fork
|
||||
STACKLEAK_ERASE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
BPON
|
||||
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
stpt __LC_EXIT_TIMER
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
ENDPROC(ret_from_fork)
|
||||
SYM_CODE_END(ret_from_fork)
|
||||
|
||||
/*
|
||||
* Program check handler routine
|
||||
*/
|
||||
|
||||
ENTRY(pgm_check_handler)
|
||||
SYM_CODE_START(pgm_check_handler)
|
||||
stpt __LC_SYS_ENTER_TIMER
|
||||
BPOFF
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r12,__LC_CURRENT
|
||||
lghi %r10,0
|
||||
lmg %r8,%r9,__LC_PGM_OLD_PSW
|
||||
tmhh %r8,0x0001 # coming from user space?
|
||||
|
@ -355,6 +349,7 @@ ENTRY(pgm_check_handler)
|
|||
#if IS_ENABLED(CONFIG_KVM)
|
||||
# cleanup critical section for program checks in __sie64a
|
||||
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
|
||||
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
SIEEXIT
|
||||
lghi %r10,_PIF_GUEST_FAULT
|
||||
#endif
|
||||
|
@ -366,8 +361,7 @@ ENTRY(pgm_check_handler)
|
|||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
# CHECK_VMAP_STACK branches to stack_overflow or 4f
|
||||
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
|
||||
3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
3: lg %r15,__LC_KERNEL_STACK
|
||||
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stg %r10,__PT_FLAGS(%r11)
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
|
@ -388,8 +382,9 @@ ENTRY(pgm_check_handler)
|
|||
brasl %r14,__do_pgm_check
|
||||
tmhh %r8,0x0001 # returning to user space?
|
||||
jno .Lpgm_exit_kernel
|
||||
STACKLEAK_ERASE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
BPON
|
||||
stpt __LC_EXIT_TIMER
|
||||
.Lpgm_exit_kernel:
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
|
@ -407,32 +402,30 @@ ENTRY(pgm_check_handler)
|
|||
lghi %r14,1
|
||||
LBEAR __LC_PGM_LAST_BREAK
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
|
||||
ENDPROC(pgm_check_handler)
|
||||
SYM_CODE_END(pgm_check_handler)
|
||||
|
||||
/*
|
||||
* Interrupt handler macro used for external and IO interrupts.
|
||||
*/
|
||||
.macro INT_HANDLER name,lc_old_psw,handler
|
||||
ENTRY(\name)
|
||||
SYM_CODE_START(\name)
|
||||
stckf __LC_INT_CLOCK
|
||||
stpt __LC_SYS_ENTER_TIMER
|
||||
STBEAR __LC_LAST_BREAK
|
||||
BPOFF
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
|
||||
lg %r12,__LC_CURRENT
|
||||
lmg %r8,%r9,\lc_old_psw
|
||||
tmhh %r8,0x0001 # interrupting from user ?
|
||||
jnz 1f
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
|
||||
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
SIEEXIT
|
||||
#endif
|
||||
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j 2f
|
||||
1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
lctlg %c1,%c1,__LC_KERNEL_ASCE
|
||||
1: lctlg %c1,%c1,__LC_KERNEL_ASCE
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
|
@ -455,13 +448,14 @@ ENTRY(\name)
|
|||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
|
||||
tmhh %r8,0x0001 # returning to user ?
|
||||
jno 2f
|
||||
STACKLEAK_ERASE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
BPON
|
||||
stpt __LC_EXIT_TIMER
|
||||
2: LBEAR __PT_LAST_BREAK(%r11)
|
||||
lmg %r0,%r15,__PT_R0(%r11)
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
ENDPROC(\name)
|
||||
SYM_CODE_END(\name)
|
||||
.endm
|
||||
|
||||
INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
|
||||
|
@ -470,7 +464,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
|
|||
/*
|
||||
* Load idle PSW.
|
||||
*/
|
||||
ENTRY(psw_idle)
|
||||
SYM_FUNC_START(psw_idle)
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15)
|
||||
stg %r3,__SF_EMPTY(%r15)
|
||||
larl %r1,psw_idle_exit
|
||||
|
@ -486,29 +480,26 @@ ENTRY(psw_idle)
|
|||
stckf __CLOCK_IDLE_ENTER(%r2)
|
||||
stpt __TIMER_IDLE_ENTER(%r2)
|
||||
lpswe __SF_EMPTY(%r15)
|
||||
.globl psw_idle_exit
|
||||
psw_idle_exit:
|
||||
SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL)
|
||||
BR_EX %r14
|
||||
ENDPROC(psw_idle)
|
||||
SYM_FUNC_END(psw_idle)
|
||||
|
||||
/*
|
||||
* Machine check handler routines
|
||||
*/
|
||||
ENTRY(mcck_int_handler)
|
||||
SYM_CODE_START(mcck_int_handler)
|
||||
stckf __LC_MCCK_CLOCK
|
||||
BPOFF
|
||||
la %r1,4095 # validate r1
|
||||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
|
||||
LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
|
||||
lg %r12,__LC_CURRENT
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA # validate gprs
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
|
||||
jo .Lmcck_panic # yes -> rest of mcck code invalid
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
|
||||
jno .Lmcck_panic # control registers invalid -> panic
|
||||
la %r14,4095
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA # validate ctl regs
|
||||
ptlb
|
||||
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
|
@ -530,16 +521,13 @@ ENTRY(mcck_int_handler)
|
|||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
|
||||
jno .Lmcck_panic
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
|
||||
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_user
|
||||
OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
|
||||
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
|
||||
4: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
SIEEXIT
|
||||
j .Lmcck_stack
|
||||
#endif
|
||||
.Lmcck_user:
|
||||
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
.Lmcck_stack:
|
||||
lg %r15,__LC_MCCK_STACK
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stctg %c1,%c1,__PT_CR1(%r11)
|
||||
|
@ -567,7 +555,7 @@ ENTRY(mcck_int_handler)
|
|||
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
|
||||
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
|
||||
jno 0f
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
BPON
|
||||
stpt __LC_EXIT_TIMER
|
||||
0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
|
||||
LBEAR 0(%r12)
|
||||
|
@ -583,10 +571,10 @@ ENTRY(mcck_int_handler)
|
|||
*/
|
||||
lhi %r5,0
|
||||
lhi %r6,1
|
||||
larl %r7,.Lstop_lock
|
||||
larl %r7,stop_lock
|
||||
cs %r5,%r6,0(%r7) # single CPU-stopper only
|
||||
jnz 4f
|
||||
larl %r7,.Lthis_cpu
|
||||
larl %r7,this_cpu
|
||||
stap 0(%r7) # this CPU address
|
||||
lh %r4,0(%r7)
|
||||
nilh %r4,0
|
||||
|
@ -602,16 +590,15 @@ ENTRY(mcck_int_handler)
|
|||
3: sigp %r1,%r4,SIGP_STOP # stop this CPU
|
||||
brc SIGP_CC_BUSY,3b
|
||||
4: j 4b
|
||||
ENDPROC(mcck_int_handler)
|
||||
SYM_CODE_END(mcck_int_handler)
|
||||
|
||||
ENTRY(restart_int_handler)
|
||||
SYM_CODE_START(restart_int_handler)
|
||||
ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
|
||||
stg %r15,__LC_SAVE_AREA_RESTART
|
||||
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
|
||||
jz 0f
|
||||
la %r15,4095
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
|
||||
0: larl %r15,.Lstosm_tmp
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA
|
||||
0: larl %r15,stosm_tmp
|
||||
stosm 0(%r15),0x04 # turn dat on, keep irqs off
|
||||
lg %r15,__LC_RESTART_STACK
|
||||
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
|
||||
|
@ -632,7 +619,7 @@ ENTRY(restart_int_handler)
|
|||
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
|
||||
brc 2,2b
|
||||
3: j 3b
|
||||
ENDPROC(restart_int_handler)
|
||||
SYM_CODE_END(restart_int_handler)
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
|
@ -642,7 +629,7 @@ ENDPROC(restart_int_handler)
|
|||
* No need to properly save the registers, we are going to panic anyway.
|
||||
* Setup a pt_regs so that show_trace can provide a good call trace.
|
||||
*/
|
||||
ENTRY(stack_overflow)
|
||||
SYM_CODE_START(stack_overflow)
|
||||
lg %r15,__LC_NODAT_STACK # change to panic stack
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
|
@ -652,26 +639,27 @@ ENTRY(stack_overflow)
|
|||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
jg kernel_stack_overflow
|
||||
ENDPROC(stack_overflow)
|
||||
SYM_CODE_END(stack_overflow)
|
||||
#endif
|
||||
|
||||
.section .data, "aw"
|
||||
.align 4
|
||||
.Lstop_lock: .long 0
|
||||
.Lthis_cpu: .short 0
|
||||
.Lstosm_tmp: .byte 0
|
||||
.balign 4
|
||||
SYM_DATA_LOCAL(stop_lock, .long 0)
|
||||
SYM_DATA_LOCAL(this_cpu, .short 0)
|
||||
SYM_DATA_LOCAL(stosm_tmp, .byte 0)
|
||||
|
||||
.section .rodata, "a"
|
||||
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
|
||||
.globl sys_call_table
|
||||
sys_call_table:
|
||||
SYM_DATA_START(sys_call_table)
|
||||
#include "asm/syscall_table.h"
|
||||
SYM_DATA_END(sys_call_table)
|
||||
#undef SYSCALL
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define SYSCALL(esame,emu) .quad __s390_ ## emu
|
||||
.globl sys_call_table_emu
|
||||
sys_call_table_emu:
|
||||
SYM_DATA_START(sys_call_table_emu)
|
||||
#include "asm/syscall_table.h"
|
||||
SYM_DATA_END(sys_call_table_emu)
|
||||
#undef SYSCALL
|
||||
#endif
|
||||
|
|
|
@ -49,26 +49,6 @@ struct ftrace_insn {
|
|||
s32 disp;
|
||||
} __packed;
|
||||
|
||||
asm(
|
||||
" .align 16\n"
|
||||
"ftrace_shared_hotpatch_trampoline_br:\n"
|
||||
" lmg %r0,%r1,2(%r1)\n"
|
||||
" br %r1\n"
|
||||
"ftrace_shared_hotpatch_trampoline_br_end:\n"
|
||||
);
|
||||
|
||||
#ifdef CONFIG_EXPOLINE
|
||||
asm(
|
||||
" .align 16\n"
|
||||
"ftrace_shared_hotpatch_trampoline_exrl:\n"
|
||||
" lmg %r0,%r1,2(%r1)\n"
|
||||
" exrl %r0,0f\n"
|
||||
" j .\n"
|
||||
"0: br %r1\n"
|
||||
"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
|
||||
);
|
||||
#endif /* CONFIG_EXPOLINE */
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
static char *ftrace_plt;
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
@ -246,7 +226,7 @@ static int __init ftrace_plt_init(void)
|
|||
|
||||
start = ftrace_shared_hotpatch_trampoline(&end);
|
||||
memcpy(ftrace_plt, start, end - start);
|
||||
set_memory_ro((unsigned long)ftrace_plt, 1);
|
||||
set_memory_rox((unsigned long)ftrace_plt, 1);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(ftrace_plt_init);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <asm/ptrace.h>
|
||||
|
||||
__HEAD
|
||||
ENTRY(startup_continue)
|
||||
SYM_CODE_START(startup_continue)
|
||||
larl %r1,tod_clock_base
|
||||
mvc 0(16,%r1),__LC_BOOT_CLOCK
|
||||
#
|
||||
|
@ -24,19 +24,17 @@ ENTRY(startup_continue)
|
|||
#
|
||||
larl %r14,init_task
|
||||
stg %r14,__LC_CURRENT
|
||||
larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
|
||||
larl %r15,init_thread_union+STACK_INIT_OFFSET
|
||||
stg %r15,__LC_KERNEL_STACK
|
||||
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
|
||||
#ifdef CONFIG_KASAN
|
||||
brasl %r14,kasan_early_init
|
||||
#endif
|
||||
brasl %r14,startup_init # s390 specific early init
|
||||
brasl %r14,start_kernel # common init code
|
||||
#
|
||||
# We returned from start_kernel ?!? PANIK
|
||||
#
|
||||
basr %r13,0
|
||||
lpswe .Ldw-.(%r13) # load disabled wait psw
|
||||
lpswe dw_psw-.(%r13) # load disabled wait psw
|
||||
SYM_CODE_END(startup_continue)
|
||||
|
||||
.align 16
|
||||
.LPG1:
|
||||
.Ldw: .quad 0x0002000180000000,0x0000000000000000
|
||||
SYM_DATA_LOCAL(dw_psw, .quad 0x0002000180000000,0x0000000000000000)
|
||||
|
|
|
@ -176,11 +176,11 @@ static bool reipl_fcp_clear;
|
|||
static bool reipl_ccw_clear;
|
||||
static bool reipl_eckd_clear;
|
||||
|
||||
static inline int __diag308(unsigned long subcode, void *addr)
|
||||
static inline int __diag308(unsigned long subcode, unsigned long addr)
|
||||
{
|
||||
union register_pair r1;
|
||||
|
||||
r1.even = (unsigned long) addr;
|
||||
r1.even = addr;
|
||||
r1.odd = 0;
|
||||
asm volatile(
|
||||
" diag %[r1],%[subcode],0x308\n"
|
||||
|
@ -195,7 +195,7 @@ static inline int __diag308(unsigned long subcode, void *addr)
|
|||
int diag308(unsigned long subcode, void *addr)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X308);
|
||||
return __diag308(subcode, addr);
|
||||
return __diag308(subcode, addr ? virt_to_phys(addr) : 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(diag308);
|
||||
|
||||
|
@ -649,7 +649,6 @@ static struct kset *ipl_kset;
|
|||
|
||||
static void __ipl_run(void *unused)
|
||||
{
|
||||
__bpon();
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ void *alloc_insn_page(void)
|
|||
page = module_alloc(PAGE_SIZE);
|
||||
if (!page)
|
||||
return NULL;
|
||||
__set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X);
|
||||
set_memory_rox((unsigned long)page, 1);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
*/
|
||||
.section .kprobes.text, "ax"
|
||||
.align 4096
|
||||
ENTRY(kprobes_insn_page)
|
||||
SYM_CODE_START(kprobes_insn_page)
|
||||
.rept 2048
|
||||
.word 0x07fe
|
||||
.endr
|
||||
ENDPROC(kprobes_insn_page)
|
||||
SYM_CODE_END(kprobes_insn_page)
|
||||
.previous
|
||||
|
|
|
@ -29,8 +29,8 @@
|
|||
#include <asm/nmi.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long,
|
||||
unsigned long);
|
||||
typedef void (*relocate_kernel_t)(unsigned long, unsigned long, unsigned long);
|
||||
typedef int (*purgatory_t)(int);
|
||||
|
||||
extern const unsigned char relocate_kernel[];
|
||||
extern const unsigned long long relocate_kernel_len;
|
||||
|
@ -41,11 +41,14 @@ extern const unsigned long long relocate_kernel_len;
|
|||
* Reset the system, copy boot CPU registers to absolute zero,
|
||||
* and jump to the kdump image
|
||||
*/
|
||||
static void __do_machine_kdump(void *image)
|
||||
static void __do_machine_kdump(void *data)
|
||||
{
|
||||
int (*start_kdump)(int);
|
||||
struct kimage *image = data;
|
||||
purgatory_t purgatory;
|
||||
unsigned long prefix;
|
||||
|
||||
purgatory = (purgatory_t)image->start;
|
||||
|
||||
/* store_status() saved the prefix register to lowcore */
|
||||
prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
|
||||
|
||||
|
@ -58,13 +61,11 @@ static void __do_machine_kdump(void *image)
|
|||
* prefix register of this CPU to zero
|
||||
*/
|
||||
memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
|
||||
(void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
|
||||
phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512);
|
||||
|
||||
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
|
||||
start_kdump = (void *)((struct kimage *) image)->start;
|
||||
start_kdump(1);
|
||||
call_nodat(1, int, purgatory, int, 1);
|
||||
|
||||
/* Die if start_kdump returns */
|
||||
/* Die if kdump returns */
|
||||
disabled_wait();
|
||||
}
|
||||
|
||||
|
@ -111,18 +112,6 @@ static noinline void __machine_kdump(void *image)
|
|||
store_status(__do_machine_kdump, image);
|
||||
}
|
||||
|
||||
static unsigned long do_start_kdump(unsigned long addr)
|
||||
{
|
||||
struct kimage *image = (struct kimage *) addr;
|
||||
int (*start_kdump)(int) = (void *)image->start;
|
||||
int rc;
|
||||
|
||||
__arch_local_irq_stnsm(0xfb); /* disable DAT */
|
||||
rc = start_kdump(0);
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
return rc;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
/*
|
||||
|
@ -131,12 +120,10 @@ static unsigned long do_start_kdump(unsigned long addr)
|
|||
static bool kdump_csum_valid(struct kimage *image)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
purgatory_t purgatory = (purgatory_t)image->start;
|
||||
int rc;
|
||||
|
||||
preempt_disable();
|
||||
rc = call_on_stack(1, S390_lowcore.nodat_stack, unsigned long, do_start_kdump,
|
||||
unsigned long, (unsigned long)image);
|
||||
preempt_enable();
|
||||
rc = call_nodat(1, int, purgatory, int, 0);
|
||||
return rc == 0;
|
||||
#else
|
||||
return false;
|
||||
|
@ -210,7 +197,7 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
return -EINVAL;
|
||||
|
||||
/* Get the destination where the assembler code should be copied to.*/
|
||||
reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
|
||||
reboot_code_buffer = page_to_virt(image->control_code_page);
|
||||
|
||||
/* Then copy it */
|
||||
memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
|
||||
|
@ -250,19 +237,20 @@ void machine_crash_shutdown(struct pt_regs *regs)
|
|||
*/
|
||||
static void __do_machine_kexec(void *data)
|
||||
{
|
||||
unsigned long diag308_subcode;
|
||||
relocate_kernel_t data_mover;
|
||||
unsigned long data_mover, entry, diag308_subcode;
|
||||
struct kimage *image = data;
|
||||
|
||||
s390_reset_system();
|
||||
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
|
||||
|
||||
__arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
|
||||
/* Call the moving routine */
|
||||
data_mover = page_to_phys(image->control_code_page);
|
||||
entry = virt_to_phys(&image->head);
|
||||
diag308_subcode = DIAG308_CLEAR_RESET;
|
||||
if (sclp.has_iplcc)
|
||||
diag308_subcode |= DIAG308_FLAG_EI;
|
||||
(*data_mover)(&image->head, image->start, diag308_subcode);
|
||||
s390_reset_system();
|
||||
|
||||
call_nodat(3, void, (relocate_kernel_t)data_mover,
|
||||
unsigned long, entry,
|
||||
unsigned long, image->start,
|
||||
unsigned long, diag308_subcode);
|
||||
|
||||
/* Die if kexec returns */
|
||||
disabled_wait();
|
||||
|
|
|
@ -28,9 +28,9 @@
|
|||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
SYM_FUNC_START(ftrace_stub)
|
||||
BR_EX %r14
|
||||
ENDPROC(ftrace_stub)
|
||||
SYM_FUNC_END(ftrace_stub)
|
||||
|
||||
SYM_CODE_START(ftrace_stub_direct_tramp)
|
||||
lgr %r1, %r0
|
||||
|
@ -140,10 +140,25 @@ SYM_FUNC_END(return_to_handler)
|
|||
#endif
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
SYM_CODE_START(ftrace_shared_hotpatch_trampoline_br)
|
||||
lmg %r0,%r1,2(%r1)
|
||||
br %r1
|
||||
SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_br_end, SYM_L_GLOBAL)
|
||||
SYM_CODE_END(ftrace_shared_hotpatch_trampoline_br)
|
||||
|
||||
#ifdef CONFIG_EXPOLINE
|
||||
SYM_CODE_START(ftrace_shared_hotpatch_trampoline_exrl)
|
||||
lmg %r0,%r1,2(%r1)
|
||||
exrl %r0,0f
|
||||
j .
|
||||
0: br %r1
|
||||
SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_exrl_end, SYM_L_GLOBAL)
|
||||
SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
|
||||
#endif /* CONFIG_EXPOLINE */
|
||||
|
||||
#ifdef CONFIG_RETHOOK
|
||||
|
||||
SYM_FUNC_START(arch_rethook_trampoline)
|
||||
|
||||
SYM_CODE_START(arch_rethook_trampoline)
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15)
|
||||
lay %r15,-STACK_FRAME_SIZE(%r15)
|
||||
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
|
||||
|
@ -166,7 +181,6 @@ SYM_FUNC_START(arch_rethook_trampoline)
|
|||
mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15)
|
||||
lmg %r0,%r15,STACK_PTREGS_GPRS(%r15)
|
||||
lpswe __SF_EMPTY(%r15)
|
||||
|
||||
SYM_FUNC_END(arch_rethook_trampoline)
|
||||
SYM_CODE_END(arch_rethook_trampoline)
|
||||
|
||||
#endif /* CONFIG_RETHOOK */
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/facility.h>
|
||||
#include <asm/ftrace.lds.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
|
@ -35,6 +36,24 @@
|
|||
|
||||
#define PLT_ENTRY_SIZE 22
|
||||
|
||||
static unsigned long get_module_load_offset(void)
|
||||
{
|
||||
static DEFINE_MUTEX(module_kaslr_mutex);
|
||||
static unsigned long module_load_offset;
|
||||
|
||||
if (!kaslr_enabled())
|
||||
return 0;
|
||||
/*
|
||||
* Calculate the module_load_offset the first time this code
|
||||
* is called. Once calculated it stays the same until reboot.
|
||||
*/
|
||||
mutex_lock(&module_kaslr_mutex);
|
||||
if (!module_load_offset)
|
||||
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
|
||||
mutex_unlock(&module_kaslr_mutex);
|
||||
return module_load_offset;
|
||||
}
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
|
@ -42,9 +61,11 @@ void *module_alloc(unsigned long size)
|
|||
|
||||
if (PAGE_ALIGN(size) > MODULES_LEN)
|
||||
return NULL;
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
|
||||
gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
||||
MODULES_VADDR + get_module_load_offset(),
|
||||
MODULES_END, gfp_mask, PAGE_KERNEL,
|
||||
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
|
||||
NUMA_NO_NODE, __builtin_return_address(0));
|
||||
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||
vfree(p);
|
||||
return NULL;
|
||||
|
@ -491,7 +512,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
|
|||
start = module_alloc(numpages * PAGE_SIZE);
|
||||
if (!start)
|
||||
return -ENOMEM;
|
||||
set_memory_ro((unsigned long)start, numpages);
|
||||
set_memory_rox((unsigned long)start, numpages);
|
||||
end = start + size;
|
||||
|
||||
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
|
||||
|
|
|
@ -76,7 +76,6 @@ static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
|
|||
}
|
||||
|
||||
struct cpu_cf_events {
|
||||
struct cpumf_ctr_info info;
|
||||
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
|
||||
u64 state; /* For perf_event_open SVC */
|
||||
u64 dev_state; /* For /dev/hwctr */
|
||||
|
@ -95,6 +94,15 @@ static DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
|
|||
static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */
|
||||
static debug_info_t *cf_dbg;
|
||||
|
||||
/*
|
||||
* The CPU Measurement query counter information instruction contains
|
||||
* information which varies per machine generation, but is constant and
|
||||
* does not change when running on a particular machine, such as counter
|
||||
* first and second version number. This is needed to determine the size
|
||||
* of counter sets. Extract this information at device driver initialization.
|
||||
*/
|
||||
static struct cpumf_ctr_info cpumf_ctr_info;
|
||||
|
||||
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
|
||||
/* interval in seconds */
|
||||
|
||||
|
@ -167,11 +175,10 @@ struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
|
|||
/* Create the trailer data at the end of a page. */
|
||||
static void cfdiag_trailer(struct cf_trailer_entry *te)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
|
||||
struct cpuid cpuid;
|
||||
|
||||
te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
|
||||
te->csvn = cpuhw->info.csvn;
|
||||
te->cfvn = cpumf_ctr_info.cfvn; /* Counter version numbers */
|
||||
te->csvn = cpumf_ctr_info.csvn;
|
||||
|
||||
get_cpu_id(&cpuid); /* Machine type */
|
||||
te->mach_type = cpuid.machine;
|
||||
|
@ -184,50 +191,60 @@ static void cfdiag_trailer(struct cf_trailer_entry *te)
|
|||
}
|
||||
|
||||
/*
|
||||
* Return the maximum possible counter set size (in number of 8 byte counters)
|
||||
* depending on type and model number.
|
||||
* The number of counters per counter set varies between machine generations,
|
||||
* but is constant when running on a particular machine generation.
|
||||
* Determine each counter set size at device driver initialization and
|
||||
* retrieve it later.
|
||||
*/
|
||||
static size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
|
||||
struct cpumf_ctr_info *info)
|
||||
static size_t cpumf_ctr_setsizes[CPUMF_CTR_SET_MAX];
|
||||
static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
|
||||
{
|
||||
size_t ctrset_size = 0;
|
||||
|
||||
switch (ctrset) {
|
||||
case CPUMF_CTR_SET_BASIC:
|
||||
if (info->cfvn >= 1)
|
||||
if (cpumf_ctr_info.cfvn >= 1)
|
||||
ctrset_size = 6;
|
||||
break;
|
||||
case CPUMF_CTR_SET_USER:
|
||||
if (info->cfvn == 1)
|
||||
if (cpumf_ctr_info.cfvn == 1)
|
||||
ctrset_size = 6;
|
||||
else if (info->cfvn >= 3)
|
||||
else if (cpumf_ctr_info.cfvn >= 3)
|
||||
ctrset_size = 2;
|
||||
break;
|
||||
case CPUMF_CTR_SET_CRYPTO:
|
||||
if (info->csvn >= 1 && info->csvn <= 5)
|
||||
if (cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5)
|
||||
ctrset_size = 16;
|
||||
else if (info->csvn == 6 || info->csvn == 7)
|
||||
else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
|
||||
ctrset_size = 20;
|
||||
break;
|
||||
case CPUMF_CTR_SET_EXT:
|
||||
if (info->csvn == 1)
|
||||
if (cpumf_ctr_info.csvn == 1)
|
||||
ctrset_size = 32;
|
||||
else if (info->csvn == 2)
|
||||
else if (cpumf_ctr_info.csvn == 2)
|
||||
ctrset_size = 48;
|
||||
else if (info->csvn >= 3 && info->csvn <= 5)
|
||||
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
|
||||
ctrset_size = 128;
|
||||
else if (info->csvn == 6 || info->csvn == 7)
|
||||
else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
|
||||
ctrset_size = 160;
|
||||
break;
|
||||
case CPUMF_CTR_SET_MT_DIAG:
|
||||
if (info->csvn > 3)
|
||||
if (cpumf_ctr_info.csvn > 3)
|
||||
ctrset_size = 48;
|
||||
break;
|
||||
case CPUMF_CTR_SET_MAX:
|
||||
break;
|
||||
}
|
||||
cpumf_ctr_setsizes[ctrset] = ctrset_size;
|
||||
}
|
||||
|
||||
return ctrset_size;
|
||||
/*
|
||||
* Return the maximum possible counter set size (in number of 8 byte counters)
|
||||
* depending on type and model number.
|
||||
*/
|
||||
static size_t cpum_cf_read_setsize(enum cpumf_ctr_set ctrset)
|
||||
{
|
||||
return cpumf_ctr_setsizes[ctrset];
|
||||
}
|
||||
|
||||
/* Read a counter set. The counter set number determines the counter set and
|
||||
|
@ -248,14 +265,13 @@ static size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
|
|||
static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
|
||||
size_t room, bool error_ok)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
|
||||
size_t ctrset_size, need = 0;
|
||||
int rc = 3; /* Assume write failure */
|
||||
|
||||
ctrdata->def = CF_DIAG_CTRSET_DEF;
|
||||
ctrdata->set = ctrset;
|
||||
ctrdata->res1 = 0;
|
||||
ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info);
|
||||
ctrset_size = cpum_cf_read_setsize(ctrset);
|
||||
|
||||
if (ctrset_size) { /* Save data */
|
||||
need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
|
||||
|
@ -269,10 +285,6 @@ static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
|
|||
need = 0;
|
||||
}
|
||||
|
||||
debug_sprintf_event(cf_dbg, 3,
|
||||
"%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
|
||||
" need %zd rc %d\n", __func__, ctrset, ctrset_size,
|
||||
cpuhw->info.cfvn, cpuhw->info.csvn, need, rc);
|
||||
return need;
|
||||
}
|
||||
|
||||
|
@ -377,40 +389,35 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
|
|||
return set;
|
||||
}
|
||||
|
||||
static int validate_ctr_version(const struct hw_perf_event *hwc,
|
||||
enum cpumf_ctr_set set)
|
||||
static int validate_ctr_version(const u64 config, enum cpumf_ctr_set set)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw;
|
||||
int err = 0;
|
||||
u16 mtdiag_ctl;
|
||||
|
||||
cpuhw = &get_cpu_var(cpu_cf_events);
|
||||
int err = 0;
|
||||
|
||||
/* check required version for counter sets */
|
||||
switch (set) {
|
||||
case CPUMF_CTR_SET_BASIC:
|
||||
case CPUMF_CTR_SET_USER:
|
||||
if (cpuhw->info.cfvn < 1)
|
||||
if (cpumf_ctr_info.cfvn < 1)
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
case CPUMF_CTR_SET_CRYPTO:
|
||||
if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
|
||||
hwc->config > 79) ||
|
||||
(cpuhw->info.csvn >= 6 && hwc->config > 83))
|
||||
if ((cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5 &&
|
||||
config > 79) || (cpumf_ctr_info.csvn >= 6 && config > 83))
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
case CPUMF_CTR_SET_EXT:
|
||||
if (cpuhw->info.csvn < 1)
|
||||
if (cpumf_ctr_info.csvn < 1)
|
||||
err = -EOPNOTSUPP;
|
||||
if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
|
||||
(cpuhw->info.csvn == 2 && hwc->config > 175) ||
|
||||
(cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
|
||||
&& hwc->config > 255) ||
|
||||
(cpuhw->info.csvn >= 6 && hwc->config > 287))
|
||||
if ((cpumf_ctr_info.csvn == 1 && config > 159) ||
|
||||
(cpumf_ctr_info.csvn == 2 && config > 175) ||
|
||||
(cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5 &&
|
||||
config > 255) ||
|
||||
(cpumf_ctr_info.csvn >= 6 && config > 287))
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
case CPUMF_CTR_SET_MT_DIAG:
|
||||
if (cpuhw->info.csvn <= 3)
|
||||
if (cpumf_ctr_info.csvn <= 3)
|
||||
err = -EOPNOTSUPP;
|
||||
/*
|
||||
* MT-diagnostic counters are read-only. The counter set
|
||||
|
@ -425,35 +432,15 @@ static int validate_ctr_version(const struct hw_perf_event *hwc,
|
|||
* counter set is enabled and active.
|
||||
*/
|
||||
mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
|
||||
if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
|
||||
(cpuhw->info.enable_ctl & mtdiag_ctl) &&
|
||||
(cpuhw->info.act_ctl & mtdiag_ctl)))
|
||||
if (!((cpumf_ctr_info.auth_ctl & mtdiag_ctl) &&
|
||||
(cpumf_ctr_info.enable_ctl & mtdiag_ctl) &&
|
||||
(cpumf_ctr_info.act_ctl & mtdiag_ctl)))
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
case CPUMF_CTR_SET_MAX:
|
||||
err = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
put_cpu_var(cpu_cf_events);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int validate_ctr_auth(const struct hw_perf_event *hwc)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw;
|
||||
int err = 0;
|
||||
|
||||
cpuhw = &get_cpu_var(cpu_cf_events);
|
||||
|
||||
/* Check authorization for cpu counter sets.
|
||||
* If the particular CPU counter set is not authorized,
|
||||
* return with -ENOENT in order to fall back to other
|
||||
* PMUs that might suffice the event request.
|
||||
*/
|
||||
if (!(hwc->config_base & cpuhw->info.auth_ctl))
|
||||
err = -ENOENT;
|
||||
|
||||
put_cpu_var(cpu_cf_events);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -471,13 +458,10 @@ static void cpumf_pmu_enable(struct pmu *pmu)
|
|||
return;
|
||||
|
||||
err = lcctl(cpuhw->state | cpuhw->dev_state);
|
||||
if (err) {
|
||||
pr_err("Enabling the performance measuring unit "
|
||||
"failed with rc=%x\n", err);
|
||||
return;
|
||||
}
|
||||
|
||||
cpuhw->flags |= PMU_F_ENABLED;
|
||||
if (err)
|
||||
pr_err("Enabling the performance measuring unit failed with rc=%x\n", err);
|
||||
else
|
||||
cpuhw->flags |= PMU_F_ENABLED;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -497,13 +481,10 @@ static void cpumf_pmu_disable(struct pmu *pmu)
|
|||
inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
|
||||
inactive |= cpuhw->dev_state;
|
||||
err = lcctl(inactive);
|
||||
if (err) {
|
||||
pr_err("Disabling the performance measuring unit "
|
||||
"failed with rc=%x\n", err);
|
||||
return;
|
||||
}
|
||||
|
||||
cpuhw->flags &= ~PMU_F_ENABLED;
|
||||
if (err)
|
||||
pr_err("Disabling the performance measuring unit failed with rc=%x\n", err);
|
||||
else
|
||||
cpuhw->flags &= ~PMU_F_ENABLED;
|
||||
}
|
||||
|
||||
#define PMC_INIT 0UL
|
||||
|
@ -515,8 +496,6 @@ static void cpum_cf_setup_cpu(void *flags)
|
|||
|
||||
switch ((unsigned long)flags) {
|
||||
case PMC_INIT:
|
||||
memset(&cpuhw->info, 0, sizeof(cpuhw->info));
|
||||
qctri(&cpuhw->info);
|
||||
cpuhw->flags |= PMU_F_RESERVED;
|
||||
break;
|
||||
|
||||
|
@ -602,7 +581,6 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
|
|||
struct perf_event_attr *attr = &event->attr;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
enum cpumf_ctr_set set;
|
||||
int err = 0;
|
||||
u64 ev;
|
||||
|
||||
switch (type) {
|
||||
|
@ -678,12 +656,15 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
|
|||
cpumf_hw_inuse();
|
||||
event->destroy = hw_perf_event_destroy;
|
||||
|
||||
/* Finally, validate version and authorization of the counter set */
|
||||
err = validate_ctr_auth(hwc);
|
||||
if (!err)
|
||||
err = validate_ctr_version(hwc, set);
|
||||
|
||||
return err;
|
||||
/*
|
||||
* Finally, validate version and authorization of the counter set.
|
||||
* If the particular CPU counter set is not authorized,
|
||||
* return with -ENOENT in order to fall back to other
|
||||
* PMUs that might suffice the event request.
|
||||
*/
|
||||
if (!(hwc->config_base & cpumf_ctr_info.auth_ctl))
|
||||
return -ENOENT;
|
||||
return validate_ctr_version(hwc->config, set);
|
||||
}
|
||||
|
||||
/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
|
||||
|
@ -983,7 +964,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
|
|||
|
||||
/* counter authorization change alert */
|
||||
if (alert & CPU_MF_INT_CF_CACA)
|
||||
qctri(&cpuhw->info);
|
||||
qctri(&cpumf_ctr_info);
|
||||
|
||||
/* loss of counter data alert */
|
||||
if (alert & CPU_MF_INT_CF_LCDA)
|
||||
|
@ -1000,9 +981,14 @@ static int __init cpumf_pmu_init(void)
|
|||
{
|
||||
int rc;
|
||||
|
||||
if (!cpum_cf_avail())
|
||||
/* Extract counter measurement facility information */
|
||||
if (!cpum_cf_avail() || qctri(&cpumf_ctr_info))
|
||||
return -ENODEV;
|
||||
|
||||
/* Determine and store counter set sizes for later reference */
|
||||
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
|
||||
cpum_cf_make_setsize(rc);
|
||||
|
||||
/*
|
||||
* Clear bit 15 of cr0 to unauthorize problem-state to
|
||||
* extract measurement counters
|
||||
|
@ -1269,28 +1255,26 @@ static int cfset_all_start(struct cfset_request *req)
|
|||
*/
|
||||
static size_t cfset_needspace(unsigned int sets)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
|
||||
size_t bytes = 0;
|
||||
int i;
|
||||
|
||||
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
|
||||
if (!(sets & cpumf_ctr_ctl[i]))
|
||||
continue;
|
||||
bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
|
||||
bytes += cpum_cf_read_setsize(i) * sizeof(u64) +
|
||||
sizeof(((struct s390_ctrset_setdata *)0)->set) +
|
||||
sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
|
||||
}
|
||||
bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
|
||||
(bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
|
||||
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
|
||||
put_cpu_ptr(&cpu_cf_events);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
|
||||
{
|
||||
struct s390_ctrset_read __user *ctrset_read;
|
||||
unsigned int cpu, cpus, rc;
|
||||
unsigned int cpu, cpus, rc = 0;
|
||||
void __user *uptr;
|
||||
|
||||
ctrset_read = (struct s390_ctrset_read __user *)arg;
|
||||
|
@ -1304,17 +1288,20 @@ static int cfset_all_copy(unsigned long arg, cpumask_t *mask)
|
|||
rc |= put_user(cpuhw->sets, &ctrset_cpudata->no_sets);
|
||||
rc |= copy_to_user(ctrset_cpudata->data, cpuhw->data,
|
||||
cpuhw->used);
|
||||
if (rc)
|
||||
return -EFAULT;
|
||||
if (rc) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
uptr += sizeof(struct s390_ctrset_cpudata) + cpuhw->used;
|
||||
cond_resched();
|
||||
}
|
||||
cpus = cpumask_weight(mask);
|
||||
if (put_user(cpus, &ctrset_read->no_cpus))
|
||||
return -EFAULT;
|
||||
debug_sprintf_event(cf_dbg, 4, "%s copied %ld\n", __func__,
|
||||
rc = -EFAULT;
|
||||
out:
|
||||
debug_sprintf_event(cf_dbg, 4, "%s rc %d copied %ld\n", __func__, rc,
|
||||
uptr - (void __user *)ctrset_read->data);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static size_t cfset_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
|
||||
|
@ -1354,7 +1341,7 @@ static void cfset_cpu_read(void *parm)
|
|||
|
||||
if (!(p->sets & cpumf_ctr_ctl[set]))
|
||||
continue; /* Counter set not in list */
|
||||
set_size = cpum_cf_ctrset_size(set, &cpuhw->info);
|
||||
set_size = cpum_cf_read_setsize(set);
|
||||
space = sizeof(cpuhw->data) - cpuhw->used;
|
||||
space = cfset_cpuset_read(sp, set, set_size, space);
|
||||
if (space) {
|
||||
|
@ -1385,14 +1372,10 @@ static int cfset_all_read(unsigned long arg, struct cfset_request *req)
|
|||
|
||||
static long cfset_ioctl_read(unsigned long arg, struct cfset_request *req)
|
||||
{
|
||||
struct s390_ctrset_read read;
|
||||
int ret = -ENODATA;
|
||||
|
||||
if (req && req->ctrset) {
|
||||
if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
|
||||
return -EFAULT;
|
||||
if (req && req->ctrset)
|
||||
ret = cfset_all_read(arg, req);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1569,16 +1552,13 @@ static void cfdiag_read(struct perf_event *event)
|
|||
|
||||
static int get_authctrsets(void)
|
||||
{
|
||||
struct cpu_cf_events *cpuhw;
|
||||
unsigned long auth = 0;
|
||||
enum cpumf_ctr_set i;
|
||||
|
||||
cpuhw = &get_cpu_var(cpu_cf_events);
|
||||
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
|
||||
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
|
||||
if (cpumf_ctr_info.auth_ctl & cpumf_ctr_ctl[i])
|
||||
auth |= cpumf_ctr_ctl[i];
|
||||
}
|
||||
put_cpu_var(cpu_cf_events);
|
||||
return auth;
|
||||
}
|
||||
|
||||
|
@ -1716,7 +1696,7 @@ static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
|
|||
enum cpumf_ctr_set i;
|
||||
|
||||
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
|
||||
size_t size = cpum_cf_ctrset_size(i, info);
|
||||
size_t size = cpum_cf_read_setsize(i);
|
||||
|
||||
if (size)
|
||||
max_size += size * sizeof(u64) +
|
||||
|
@ -1750,16 +1730,12 @@ static void cfdiag_get_cpu_speed(void)
|
|||
|
||||
static int cfset_init(void)
|
||||
{
|
||||
struct cpumf_ctr_info info;
|
||||
size_t need;
|
||||
int rc;
|
||||
|
||||
if (qctri(&info))
|
||||
return -ENODEV;
|
||||
|
||||
cfdiag_get_cpu_speed();
|
||||
/* Make sure the counter set data fits into predefined buffer. */
|
||||
need = cfdiag_maxsize(&info);
|
||||
need = cfdiag_maxsize(&cpumf_ctr_info);
|
||||
if (need > sizeof(((struct cpu_cf_events *)0)->start)) {
|
||||
pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
|
||||
need);
|
||||
|
|
|
@ -882,10 +882,6 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|||
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
|
||||
}
|
||||
|
||||
/* Check and set other sampling flags */
|
||||
if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
|
||||
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
|
||||
|
||||
err = __hw_perf_event_init_rate(event, &si);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -1293,11 +1289,8 @@ static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t
|
|||
* The sampling buffer position are retrieved and saved in the TEAR_REG
|
||||
* register of the specified perf event.
|
||||
*
|
||||
* Only full sample-data-blocks are processed. Specify the flash_all flag
|
||||
* to also walk through partially filled sample-data-blocks. It is ignored
|
||||
* if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag
|
||||
* enforces the processing of full sample-data-blocks only (trailer entries
|
||||
* with the block-full-indicator bit set).
|
||||
* Only full sample-data-blocks are processed. Specify the flush_all flag
|
||||
* to also walk through partially filled sample-data-blocks.
|
||||
*/
|
||||
static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
||||
{
|
||||
|
@ -1315,9 +1308,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
|||
if (SAMPL_DIAG_MODE(&event->hw))
|
||||
return;
|
||||
|
||||
if (flush_all && SDB_FULL_BLOCKS(hwc))
|
||||
flush_all = 0;
|
||||
|
||||
sdbt = (unsigned long *) TEAR_REG(hwc);
|
||||
done = event_overflow = sampl_overflow = num_sdb = 0;
|
||||
while (!done) {
|
||||
|
|
|
@ -136,12 +136,12 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
p->thread.last_break = 1;
|
||||
|
||||
frame->sf.back_chain = 0;
|
||||
frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame);
|
||||
frame->sf.gprs[6] = (unsigned long)p;
|
||||
frame->sf.gprs[11 - 6] = (unsigned long)&frame->childregs;
|
||||
frame->sf.gprs[12 - 6] = (unsigned long)p;
|
||||
/* new return point is ret_from_fork */
|
||||
frame->sf.gprs[8] = (unsigned long)ret_from_fork;
|
||||
frame->sf.gprs[14 - 6] = (unsigned long)ret_from_fork;
|
||||
/* fake return stack for resume(), don't go back to schedule */
|
||||
frame->sf.gprs[9] = (unsigned long)frame;
|
||||
frame->sf.gprs[15 - 6] = (unsigned long)frame;
|
||||
|
||||
/* Store access registers to kernel stack of new process. */
|
||||
if (unlikely(args->fn)) {
|
||||
|
@ -149,8 +149,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
memset(&frame->childregs, 0, sizeof(struct pt_regs));
|
||||
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO |
|
||||
PSW_MASK_EXT | PSW_MASK_MCHECK;
|
||||
frame->childregs.psw.addr =
|
||||
(unsigned long)__ret_from_fork;
|
||||
frame->childregs.gprs[9] = (unsigned long)args->fn;
|
||||
frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
|
||||
frame->childregs.orig_gpr2 = -1;
|
||||
|
|
|
@ -364,21 +364,3 @@ const struct seq_operations cpuinfo_op = {
|
|||
.stop = c_stop,
|
||||
.show = show_cpuinfo,
|
||||
};
|
||||
|
||||
int s390_isolate_bp(void)
|
||||
{
|
||||
if (!test_facility(82))
|
||||
return -EOPNOTSUPP;
|
||||
set_thread_flag(TIF_ISOLATE_BP);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(s390_isolate_bp);
|
||||
|
||||
int s390_isolate_bp_guest(void)
|
||||
{
|
||||
if (!test_facility(82))
|
||||
return -EOPNOTSUPP;
|
||||
set_thread_flag(TIF_ISOLATE_BP_GUEST);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(s390_isolate_bp_guest);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
# r2 = Function to be called after store status
|
||||
# r3 = Parameter for function
|
||||
#
|
||||
ENTRY(store_status)
|
||||
SYM_CODE_START(store_status)
|
||||
/* Save register one and load save area base */
|
||||
stg %r1,__LC_SAVE_AREA_RESTART
|
||||
/* General purpose registers */
|
||||
|
@ -61,7 +61,7 @@ ENTRY(store_status)
|
|||
stpx 0(%r1)
|
||||
/* Clock comparator - seven bytes */
|
||||
lghi %r1,__LC_CLOCK_COMP_SAVE_AREA
|
||||
larl %r4,.Lclkcmp
|
||||
larl %r4,clkcmp
|
||||
stckc 0(%r4)
|
||||
mvc 1(7,%r1),1(%r4)
|
||||
/* Program status word */
|
||||
|
@ -73,9 +73,9 @@ ENTRY(store_status)
|
|||
lgr %r9,%r2
|
||||
lgr %r2,%r3
|
||||
BR_EX %r9
|
||||
ENDPROC(store_status)
|
||||
SYM_CODE_END(store_status)
|
||||
|
||||
.section .bss
|
||||
.align 8
|
||||
.Lclkcmp: .quad 0x0000000000000000
|
||||
.balign 8
|
||||
SYM_DATA_LOCAL(clkcmp, .quad 0x0000000000000000)
|
||||
.previous
|
||||
|
|
|
@ -26,53 +26,51 @@
|
|||
*/
|
||||
|
||||
.text
|
||||
ENTRY(relocate_kernel)
|
||||
basr %r13,0 # base address
|
||||
.base:
|
||||
lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
|
||||
lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
|
||||
lg %r5,0(%r2) # read another word for indirection page
|
||||
aghi %r2,8 # increment pointer
|
||||
tml %r5,0x1 # is it a destination page?
|
||||
je .indir_check # NO, goto "indir_check"
|
||||
lgr %r6,%r5 # r6 = r5
|
||||
nill %r6,0xf000 # mask it out and...
|
||||
j .base # ...next iteration
|
||||
.indir_check:
|
||||
tml %r5,0x2 # is it a indirection page?
|
||||
je .done_test # NO, goto "done_test"
|
||||
nill %r5,0xf000 # YES, mask out,
|
||||
lgr %r2,%r5 # move it into the right register,
|
||||
j .base # and read next...
|
||||
.done_test:
|
||||
tml %r5,0x4 # is it the done indicator?
|
||||
je .source_test # NO! Well, then it should be the source indicator...
|
||||
j .done # ok, lets finish it here...
|
||||
.source_test:
|
||||
tml %r5,0x8 # it should be a source indicator...
|
||||
je .base # NO, ignore it...
|
||||
lgr %r8,%r5 # r8 = r5
|
||||
nill %r8,0xf000 # masking
|
||||
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
|
||||
jo 0b
|
||||
j .base
|
||||
.done:
|
||||
lgr %r0,%r4 # subcode
|
||||
cghi %r3,0
|
||||
je .diag
|
||||
la %r4,load_psw-.base(%r13) # load psw-address into the register
|
||||
o %r3,4(%r4) # or load address into psw
|
||||
st %r3,4(%r4)
|
||||
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
|
||||
.diag:
|
||||
diag %r0,%r0,0x308
|
||||
ENDPROC(relocate_kernel)
|
||||
SYM_CODE_START(relocate_kernel)
|
||||
basr %r13,0 # base address
|
||||
.base:
|
||||
lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
|
||||
lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
|
||||
lg %r5,0(%r2) # read another word for indirection page
|
||||
aghi %r2,8 # increment pointer
|
||||
tml %r5,0x1 # is it a destination page?
|
||||
je .indir_check # NO, goto "indir_check"
|
||||
lgr %r6,%r5 # r6 = r5
|
||||
nill %r6,0xf000 # mask it out and...
|
||||
j .base # ...next iteration
|
||||
.indir_check:
|
||||
tml %r5,0x2 # is it a indirection page?
|
||||
je .done_test # NO, goto "done_test"
|
||||
nill %r5,0xf000 # YES, mask out,
|
||||
lgr %r2,%r5 # move it into the right register,
|
||||
j .base # and read next...
|
||||
.done_test:
|
||||
tml %r5,0x4 # is it the done indicator?
|
||||
je .source_test # NO! Well, then it should be the source indicator...
|
||||
j .done # ok, lets finish it here...
|
||||
.source_test:
|
||||
tml %r5,0x8 # it should be a source indicator...
|
||||
je .base # NO, ignore it...
|
||||
lgr %r8,%r5 # r8 = r5
|
||||
nill %r8,0xf000 # masking
|
||||
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
|
||||
jo 0b
|
||||
j .base
|
||||
.done:
|
||||
lgr %r0,%r4 # subcode
|
||||
cghi %r3,0
|
||||
je .diag
|
||||
la %r4,load_psw-.base(%r13) # load psw-address into the register
|
||||
o %r3,4(%r4) # or load address into psw
|
||||
st %r3,4(%r4)
|
||||
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
|
||||
.diag:
|
||||
diag %r0,%r0,0x308
|
||||
SYM_CODE_END(relocate_kernel)
|
||||
|
||||
.align 8
|
||||
load_psw:
|
||||
.long 0x00080000,0x80000000
|
||||
relocate_kernel_end:
|
||||
.align 8
|
||||
.globl relocate_kernel_len
|
||||
relocate_kernel_len:
|
||||
.quad relocate_kernel_end - relocate_kernel
|
||||
.balign 8
|
||||
SYM_DATA_START_LOCAL(load_psw)
|
||||
.long 0x00080000,0x80000000
|
||||
SYM_DATA_END_LABEL(load_psw, SYM_L_LOCAL, relocate_kernel_end)
|
||||
.balign 8
|
||||
SYM_DATA(relocate_kernel_len, .quad relocate_kernel_end - relocate_kernel)
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
#include <asm/numa.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <asm/maccess.h>
|
||||
#include <asm/uv.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
@ -147,14 +147,10 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
|
|||
|
||||
int __bootdata(noexec_disabled);
|
||||
unsigned long __bootdata(ident_map_size);
|
||||
struct mem_detect_info __bootdata(mem_detect);
|
||||
struct initrd_data __bootdata(initrd_data);
|
||||
unsigned long __bootdata(pgalloc_pos);
|
||||
unsigned long __bootdata(pgalloc_end);
|
||||
unsigned long __bootdata(pgalloc_low);
|
||||
struct physmem_info __bootdata(physmem_info);
|
||||
|
||||
unsigned long __bootdata_preserved(__kaslr_offset);
|
||||
unsigned long __bootdata(__amode31_base);
|
||||
int __bootdata_preserved(__kaslr_enabled);
|
||||
unsigned int __bootdata_preserved(zlib_dfltcc_support);
|
||||
EXPORT_SYMBOL(zlib_dfltcc_support);
|
||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||
|
@ -385,39 +381,27 @@ void stack_free(unsigned long stack)
|
|||
#endif
|
||||
}
|
||||
|
||||
int __init arch_early_irq_init(void)
|
||||
{
|
||||
unsigned long stack;
|
||||
|
||||
stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
|
||||
if (!stack)
|
||||
panic("Couldn't allocate async stack");
|
||||
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init __noreturn arch_call_rest_init(void)
|
||||
{
|
||||
smp_reinit_ipl_cpu();
|
||||
rest_init();
|
||||
}
|
||||
|
||||
static unsigned long __init stack_alloc_early(void)
|
||||
{
|
||||
unsigned long stack;
|
||||
|
||||
smp_reinit_ipl_cpu();
|
||||
stack = stack_alloc();
|
||||
if (!stack)
|
||||
panic("Couldn't allocate kernel stack");
|
||||
current->stack = (void *) stack;
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
current->stack_vm_area = (void *) stack;
|
||||
#endif
|
||||
set_task_stack_end_magic(current);
|
||||
stack += STACK_INIT_OFFSET;
|
||||
S390_lowcore.kernel_stack = stack;
|
||||
call_on_stack_noreturn(rest_init, stack);
|
||||
stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
if (!stack) {
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||
__func__, THREAD_SIZE, THREAD_SIZE);
|
||||
}
|
||||
return stack;
|
||||
}
|
||||
|
||||
static void __init setup_lowcore(void)
|
||||
{
|
||||
struct lowcore *lc, *abs_lc;
|
||||
unsigned long mcck_stack;
|
||||
|
||||
/*
|
||||
* Setup lowcore for boot cpu
|
||||
|
@ -441,8 +425,6 @@ static void __init setup_lowcore(void)
|
|||
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
||||
lc->io_new_psw.addr = (unsigned long) io_int_handler;
|
||||
lc->clock_comparator = clock_comparator_max;
|
||||
lc->nodat_stack = ((unsigned long) &init_thread_union)
|
||||
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->current_task = (unsigned long)&init_task;
|
||||
lc->lpp = LPP_MAGIC;
|
||||
lc->machine_flags = S390_lowcore.machine_flags;
|
||||
|
@ -455,17 +437,15 @@ static void __init setup_lowcore(void)
|
|||
lc->steal_timer = S390_lowcore.steal_timer;
|
||||
lc->last_update_timer = S390_lowcore.last_update_timer;
|
||||
lc->last_update_clock = S390_lowcore.last_update_clock;
|
||||
|
||||
/*
|
||||
* Allocate the global restart stack which is the same for
|
||||
* all CPUs in cast *one* of them does a PSW restart.
|
||||
* all CPUs in case *one* of them does a PSW restart.
|
||||
*/
|
||||
restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
if (!restart_stack)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||
__func__, THREAD_SIZE, THREAD_SIZE);
|
||||
restart_stack += STACK_INIT_OFFSET;
|
||||
|
||||
restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
|
||||
lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
|
||||
lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
|
||||
lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
|
||||
lc->kernel_stack = S390_lowcore.kernel_stack;
|
||||
/*
|
||||
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
|
||||
* restart data to the absolute zero lowcore. This is necessary if
|
||||
|
@ -476,13 +456,6 @@ static void __init setup_lowcore(void)
|
|||
lc->restart_data = 0;
|
||||
lc->restart_source = -1U;
|
||||
__ctl_store(lc->cregs_save_area, 0, 15);
|
||||
|
||||
mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
if (!mcck_stack)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||
__func__, THREAD_SIZE, THREAD_SIZE);
|
||||
lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
|
||||
|
||||
lc->spinlock_lockval = arch_spin_lockval(0);
|
||||
lc->spinlock_index = 0;
|
||||
arch_spin_lock_setup(0);
|
||||
|
@ -635,7 +608,11 @@ static struct notifier_block kdump_mem_nb = {
|
|||
*/
|
||||
static void __init reserve_pgtables(void)
|
||||
{
|
||||
memblock_reserve(pgalloc_pos, pgalloc_end - pgalloc_pos);
|
||||
unsigned long start, end;
|
||||
struct reserved_range *range;
|
||||
|
||||
for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
|
||||
memblock_reserve(start, end - start);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -712,13 +689,13 @@ static void __init reserve_crashkernel(void)
|
|||
*/
|
||||
static void __init reserve_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (!initrd_data.start || !initrd_data.size)
|
||||
unsigned long addr, size;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
|
||||
return;
|
||||
initrd_start = (unsigned long)__va(initrd_data.start);
|
||||
initrd_end = initrd_start + initrd_data.size;
|
||||
memblock_reserve(initrd_data.start, initrd_data.size);
|
||||
#endif
|
||||
initrd_start = (unsigned long)__va(addr);
|
||||
initrd_end = initrd_start + size;
|
||||
memblock_reserve(addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -730,71 +707,39 @@ static void __init reserve_certificate_list(void)
|
|||
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
|
||||
}
|
||||
|
||||
static void __init reserve_mem_detect_info(void)
|
||||
static void __init reserve_physmem_info(void)
|
||||
{
|
||||
unsigned long start, size;
|
||||
unsigned long addr, size;
|
||||
|
||||
get_mem_detect_reserved(&start, &size);
|
||||
if (size)
|
||||
memblock_reserve(start, size);
|
||||
if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
|
||||
memblock_reserve(addr, size);
|
||||
}
|
||||
|
||||
static void __init free_mem_detect_info(void)
|
||||
static void __init free_physmem_info(void)
|
||||
{
|
||||
unsigned long start, size;
|
||||
unsigned long addr, size;
|
||||
|
||||
get_mem_detect_reserved(&start, &size);
|
||||
if (size)
|
||||
memblock_phys_free(start, size);
|
||||
if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
|
||||
memblock_phys_free(addr, size);
|
||||
}
|
||||
|
||||
static const char * __init get_mem_info_source(void)
|
||||
{
|
||||
switch (mem_detect.info_source) {
|
||||
case MEM_DETECT_SCLP_STOR_INFO:
|
||||
return "sclp storage info";
|
||||
case MEM_DETECT_DIAG260:
|
||||
return "diag260";
|
||||
case MEM_DETECT_SCLP_READ_INFO:
|
||||
return "sclp read info";
|
||||
case MEM_DETECT_BIN_SEARCH:
|
||||
return "binary search";
|
||||
}
|
||||
return "none";
|
||||
}
|
||||
|
||||
static void __init memblock_add_mem_detect_info(void)
|
||||
static void __init memblock_add_physmem_info(void)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int i;
|
||||
|
||||
pr_debug("physmem info source: %s (%hhd)\n",
|
||||
get_mem_info_source(), mem_detect.info_source);
|
||||
get_physmem_info_source(), physmem_info.info_source);
|
||||
/* keep memblock lists close to the kernel */
|
||||
memblock_set_bottom_up(true);
|
||||
for_each_mem_detect_usable_block(i, &start, &end)
|
||||
for_each_physmem_usable_range(i, &start, &end)
|
||||
memblock_add(start, end - start);
|
||||
for_each_mem_detect_block(i, &start, &end)
|
||||
for_each_physmem_online_range(i, &start, &end)
|
||||
memblock_physmem_add(start, end - start);
|
||||
memblock_set_bottom_up(false);
|
||||
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for initrd being in usable memory
|
||||
*/
|
||||
static void __init check_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_data.start && initrd_data.size &&
|
||||
!memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
|
||||
pr_err("The initial RAM disk does not fit into the memory\n");
|
||||
memblock_phys_free(initrd_data.start, initrd_data.size);
|
||||
initrd_start = initrd_end = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve memory used for lowcore/command line/kernel image.
|
||||
*/
|
||||
|
@ -803,7 +748,7 @@ static void __init reserve_kernel(void)
|
|||
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
|
||||
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
|
||||
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
|
||||
memblock_reserve(__amode31_base, __eamode31 - __samode31);
|
||||
memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
|
||||
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
|
||||
memblock_reserve(__pa(_stext), _end - _stext);
|
||||
}
|
||||
|
@ -825,13 +770,13 @@ static void __init setup_memory(void)
|
|||
static void __init relocate_amode31_section(void)
|
||||
{
|
||||
unsigned long amode31_size = __eamode31 - __samode31;
|
||||
long amode31_offset = __amode31_base - __samode31;
|
||||
long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
|
||||
long *ptr;
|
||||
|
||||
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
|
||||
|
||||
/* Move original AMODE31 section to the new one */
|
||||
memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
|
||||
memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
|
||||
/* Zero out the old AMODE31 section to catch invalid accesses within it */
|
||||
memset((void *)__samode31, 0, amode31_size);
|
||||
|
||||
|
@ -997,14 +942,14 @@ void __init setup_arch(char **cmdline_p)
|
|||
reserve_kernel();
|
||||
reserve_initrd();
|
||||
reserve_certificate_list();
|
||||
reserve_mem_detect_info();
|
||||
reserve_physmem_info();
|
||||
memblock_set_current_limit(ident_map_size);
|
||||
memblock_allow_resize();
|
||||
|
||||
/* Get information about *all* installed memory */
|
||||
memblock_add_mem_detect_info();
|
||||
memblock_add_physmem_info();
|
||||
|
||||
free_mem_detect_info();
|
||||
free_physmem_info();
|
||||
setup_memory_end();
|
||||
memblock_dump_all();
|
||||
setup_memory();
|
||||
|
@ -1017,7 +962,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
if (MACHINE_HAS_EDAT2)
|
||||
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
|
||||
|
||||
check_initrd();
|
||||
reserve_crashkernel();
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
/*
|
||||
|
|
|
@ -280,9 +280,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
|
|||
|
||||
cpu = pcpu - pcpu_devices;
|
||||
lc = lowcore_ptr[cpu];
|
||||
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
|
||||
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->current_task = (unsigned long) tsk;
|
||||
lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
|
||||
lc->current_task = (unsigned long)tsk;
|
||||
lc->lpp = LPP_MAGIC;
|
||||
lc->current_pid = tsk->pid;
|
||||
lc->user_timer = tsk->thread.user_timer;
|
||||
|
@ -348,7 +347,6 @@ static void pcpu_delegate(struct pcpu *pcpu,
|
|||
abs_lc->restart_source = source_cpu;
|
||||
put_abs_lowcore(abs_lc);
|
||||
}
|
||||
__bpon();
|
||||
asm volatile(
|
||||
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
|
||||
" brc 2,0b # busy, try again\n"
|
||||
|
@ -986,7 +984,6 @@ void __cpu_die(unsigned int cpu)
|
|||
void __noreturn cpu_die(void)
|
||||
{
|
||||
idle_task_exit();
|
||||
__bpon();
|
||||
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
|
||||
for (;;) ;
|
||||
}
|
||||
|
@ -1302,9 +1299,9 @@ int __init smp_reinit_ipl_cpu(void)
|
|||
local_mcck_enable();
|
||||
local_irq_restore(flags);
|
||||
|
||||
free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
|
||||
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
|
||||
memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
|
||||
memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
|
||||
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -449,7 +449,7 @@
|
|||
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
|
||||
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
447 common memfd_secret sys_memfd_secret sys_memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv sys_futex_waitv
|
||||
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
/*
|
||||
* int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
|
||||
*/
|
||||
ENTRY(_diag14_amode31)
|
||||
SYM_FUNC_START(_diag14_amode31)
|
||||
lgr %r1,%r2
|
||||
lgr %r2,%r3
|
||||
lgr %r3,%r4
|
||||
|
@ -42,12 +42,12 @@ ENTRY(_diag14_amode31)
|
|||
lgfr %r2,%r5
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
|
||||
ENDPROC(_diag14_amode31)
|
||||
SYM_FUNC_END(_diag14_amode31)
|
||||
|
||||
/*
|
||||
* int _diag210_amode31(struct diag210 *addr)
|
||||
*/
|
||||
ENTRY(_diag210_amode31)
|
||||
SYM_FUNC_START(_diag210_amode31)
|
||||
lgr %r1,%r2
|
||||
lhi %r2,-1
|
||||
sam31
|
||||
|
@ -60,12 +60,12 @@ ENTRY(_diag210_amode31)
|
|||
lgfr %r2,%r2
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
|
||||
ENDPROC(_diag210_amode31)
|
||||
SYM_FUNC_END(_diag210_amode31)
|
||||
|
||||
/*
|
||||
* int diag8c(struct diag8c *addr, struct ccw_dev_id *devno, size_t len)
|
||||
*/
|
||||
ENTRY(_diag8c_amode31)
|
||||
SYM_FUNC_START(_diag8c_amode31)
|
||||
llgf %r3,0(%r3)
|
||||
sam31
|
||||
diag %r2,%r4,0x8c
|
||||
|
@ -74,11 +74,11 @@ ENTRY(_diag8c_amode31)
|
|||
lgfr %r2,%r3
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag8c_ex, .Ldiag8c_ex)
|
||||
ENDPROC(_diag8c_amode31)
|
||||
SYM_FUNC_END(_diag8c_amode31)
|
||||
/*
|
||||
* int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
|
||||
*/
|
||||
ENTRY(_diag26c_amode31)
|
||||
SYM_FUNC_START(_diag26c_amode31)
|
||||
lghi %r5,-EOPNOTSUPP
|
||||
sam31
|
||||
diag %r2,%r4,0x26c
|
||||
|
@ -87,42 +87,42 @@ ENTRY(_diag26c_amode31)
|
|||
lgfr %r2,%r5
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
|
||||
ENDPROC(_diag26c_amode31)
|
||||
SYM_FUNC_END(_diag26c_amode31)
|
||||
|
||||
/*
|
||||
* void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
|
||||
*/
|
||||
ENTRY(_diag0c_amode31)
|
||||
SYM_FUNC_START(_diag0c_amode31)
|
||||
sam31
|
||||
diag %r2,%r2,0x0c
|
||||
sam64
|
||||
BR_EX_AMODE31_r14
|
||||
ENDPROC(_diag0c_amode31)
|
||||
SYM_FUNC_END(_diag0c_amode31)
|
||||
|
||||
/*
|
||||
* void _diag308_reset_amode31(void)
|
||||
*
|
||||
* Calls diag 308 subcode 1 and continues execution
|
||||
*/
|
||||
ENTRY(_diag308_reset_amode31)
|
||||
larl %r4,.Lctlregs # Save control registers
|
||||
SYM_FUNC_START(_diag308_reset_amode31)
|
||||
larl %r4,ctlregs # Save control registers
|
||||
stctg %c0,%c15,0(%r4)
|
||||
lg %r2,0(%r4) # Disable lowcore protection
|
||||
nilh %r2,0xefff
|
||||
larl %r4,.Lctlreg0
|
||||
larl %r4,ctlreg0
|
||||
stg %r2,0(%r4)
|
||||
lctlg %c0,%c0,0(%r4)
|
||||
larl %r4,.Lfpctl # Floating point control register
|
||||
larl %r4,fpctl # Floating point control register
|
||||
stfpc 0(%r4)
|
||||
larl %r4,.Lprefix # Save prefix register
|
||||
larl %r4,prefix # Save prefix register
|
||||
stpx 0(%r4)
|
||||
larl %r4,.Lprefix_zero # Set prefix register to 0
|
||||
larl %r4,prefix_zero # Set prefix register to 0
|
||||
spx 0(%r4)
|
||||
larl %r4,.Lcontinue_psw # Save PSW flags
|
||||
larl %r4,continue_psw # Save PSW flags
|
||||
epsw %r2,%r3
|
||||
stm %r2,%r3,0(%r4)
|
||||
larl %r4,.Lrestart_part2 # Setup restart PSW at absolute 0
|
||||
larl %r3,.Lrestart_diag308_psw
|
||||
larl %r3,restart_diag308_psw
|
||||
og %r4,0(%r3) # Save PSW
|
||||
lghi %r3,0
|
||||
sturg %r4,%r3 # Use sturg, because of large pages
|
||||
|
@ -134,39 +134,26 @@ ENTRY(_diag308_reset_amode31)
|
|||
lhi %r1,2 # Use mode 2 = ESAME (dump)
|
||||
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
|
||||
sam64 # Switch to 64 bit addressing mode
|
||||
larl %r4,.Lctlregs # Restore control registers
|
||||
larl %r4,ctlregs # Restore control registers
|
||||
lctlg %c0,%c15,0(%r4)
|
||||
larl %r4,.Lfpctl # Restore floating point ctl register
|
||||
larl %r4,fpctl # Restore floating point ctl register
|
||||
lfpc 0(%r4)
|
||||
larl %r4,.Lprefix # Restore prefix register
|
||||
larl %r4,prefix # Restore prefix register
|
||||
spx 0(%r4)
|
||||
larl %r4,.Lcontinue_psw # Restore PSW flags
|
||||
larl %r4,continue_psw # Restore PSW flags
|
||||
larl %r2,.Lcontinue
|
||||
stg %r2,8(%r4)
|
||||
lpswe 0(%r4)
|
||||
.Lcontinue:
|
||||
BR_EX_AMODE31_r14
|
||||
ENDPROC(_diag308_reset_amode31)
|
||||
SYM_FUNC_END(_diag308_reset_amode31)
|
||||
|
||||
.section .amode31.data,"aw",@progbits
|
||||
.align 8
|
||||
.Lrestart_diag308_psw:
|
||||
.long 0x00080000,0x80000000
|
||||
|
||||
.align 8
|
||||
.Lcontinue_psw:
|
||||
.quad 0,0
|
||||
|
||||
.align 8
|
||||
.Lctlreg0:
|
||||
.quad 0
|
||||
.Lctlregs:
|
||||
.rept 16
|
||||
.quad 0
|
||||
.endr
|
||||
.Lfpctl:
|
||||
.long 0
|
||||
.Lprefix:
|
||||
.long 0
|
||||
.Lprefix_zero:
|
||||
.long 0
|
||||
.balign 8
|
||||
SYM_DATA_LOCAL(restart_diag308_psw, .long 0x00080000,0x80000000)
|
||||
SYM_DATA_LOCAL(continue_psw, .quad 0,0)
|
||||
SYM_DATA_LOCAL(ctlreg0, .quad 0)
|
||||
SYM_DATA_LOCAL(ctlregs, .fill 16,8,0)
|
||||
SYM_DATA_LOCAL(fpctl, .long 0)
|
||||
SYM_DATA_LOCAL(prefix, .long 0)
|
||||
SYM_DATA_LOCAL(prefix_zero, .long 0)
|
||||
|
|
|
@ -637,16 +637,6 @@ static struct ctl_table topology_ctl_table[] = {
|
|||
{ },
|
||||
};
|
||||
|
||||
static struct ctl_table topology_dir_table[] = {
|
||||
{
|
||||
.procname = "s390",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = topology_ctl_table,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static int __init topology_init(void)
|
||||
{
|
||||
struct device *dev_root;
|
||||
|
@ -657,7 +647,7 @@ static int __init topology_init(void)
|
|||
set_topology_timer();
|
||||
else
|
||||
topology_update_polarization_simple();
|
||||
register_sysctl_table(topology_dir_table);
|
||||
register_sysctl("s390", topology_ctl_table);
|
||||
|
||||
dev_root = bus_get_dev_root(&cpu_subsys);
|
||||
if (dev_root) {
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/dwarf.h>
|
||||
|
||||
.macro vdso_syscall func,syscall
|
||||
.globl __kernel_compat_\func
|
||||
.type __kernel_compat_\func,@function
|
||||
.align 8
|
||||
__ALIGN
|
||||
__kernel_compat_\func:
|
||||
CFI_STARTPROC
|
||||
svc \syscall
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
@ -16,7 +17,7 @@
|
|||
.macro vdso_func func
|
||||
.globl __kernel_\func
|
||||
.type __kernel_\func,@function
|
||||
.align 8
|
||||
__ALIGN
|
||||
__kernel_\func:
|
||||
CFI_STARTPROC
|
||||
aghi %r15,-WRAPPER_FRAME_SIZE
|
||||
|
@ -41,7 +42,7 @@ vdso_func getcpu
|
|||
.macro vdso_syscall func,syscall
|
||||
.globl __kernel_\func
|
||||
.type __kernel_\func,@function
|
||||
.align 8
|
||||
__ALIGN
|
||||
__kernel_\func:
|
||||
CFI_STARTPROC
|
||||
svc \syscall
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) \
|
||||
*(.bss..invalid_pg_dir)
|
||||
|
||||
#define RO_EXCEPTION_TABLE_ALIGN 16
|
||||
|
||||
/* Handle ro_after_init data on our own. */
|
||||
#define RO_AFTER_INIT_DATA
|
||||
|
||||
|
@ -66,7 +68,6 @@ SECTIONS
|
|||
*(.data..ro_after_init)
|
||||
JUMP_TABLE_DATA
|
||||
} :data
|
||||
EXCEPTION_TABLE(16)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__end_ro_after_init = .;
|
||||
|
||||
|
@ -219,6 +220,13 @@ SECTIONS
|
|||
QUAD(init_mm)
|
||||
QUAD(swapper_pg_dir)
|
||||
QUAD(invalid_pg_dir)
|
||||
#ifdef CONFIG_KASAN
|
||||
QUAD(kasan_early_shadow_page)
|
||||
QUAD(kasan_early_shadow_pte)
|
||||
QUAD(kasan_early_shadow_pmd)
|
||||
QUAD(kasan_early_shadow_pud)
|
||||
QUAD(kasan_early_shadow_p4d)
|
||||
#endif
|
||||
} :NONE
|
||||
|
||||
/* Debugging sections. */
|
||||
|
|
|
@ -14,8 +14,7 @@
|
|||
/*
|
||||
* void *memmove(void *dest, const void *src, size_t n)
|
||||
*/
|
||||
WEAK(memmove)
|
||||
ENTRY(__memmove)
|
||||
SYM_FUNC_START(__memmove)
|
||||
ltgr %r4,%r4
|
||||
lgr %r1,%r2
|
||||
jz .Lmemmove_exit
|
||||
|
@ -48,7 +47,10 @@ ENTRY(__memmove)
|
|||
BR_EX %r14
|
||||
.Lmemmove_mvc:
|
||||
mvc 0(1,%r1),0(%r3)
|
||||
ENDPROC(__memmove)
|
||||
SYM_FUNC_END(__memmove)
|
||||
EXPORT_SYMBOL(__memmove)
|
||||
|
||||
SYM_FUNC_ALIAS(memmove, __memmove)
|
||||
EXPORT_SYMBOL(memmove)
|
||||
|
||||
/*
|
||||
|
@ -66,8 +68,7 @@ EXPORT_SYMBOL(memmove)
|
|||
* return __builtin_memset(s, c, n);
|
||||
* }
|
||||
*/
|
||||
WEAK(memset)
|
||||
ENTRY(__memset)
|
||||
SYM_FUNC_START(__memset)
|
||||
ltgr %r4,%r4
|
||||
jz .Lmemset_exit
|
||||
ltgr %r3,%r3
|
||||
|
@ -111,7 +112,10 @@ ENTRY(__memset)
|
|||
xc 0(1,%r1),0(%r1)
|
||||
.Lmemset_mvc:
|
||||
mvc 1(1,%r1),0(%r1)
|
||||
ENDPROC(__memset)
|
||||
SYM_FUNC_END(__memset)
|
||||
EXPORT_SYMBOL(__memset)
|
||||
|
||||
SYM_FUNC_ALIAS(memset, __memset)
|
||||
EXPORT_SYMBOL(memset)
|
||||
|
||||
/*
|
||||
|
@ -119,8 +123,7 @@ EXPORT_SYMBOL(memset)
|
|||
*
|
||||
* void *memcpy(void *dest, const void *src, size_t n)
|
||||
*/
|
||||
WEAK(memcpy)
|
||||
ENTRY(__memcpy)
|
||||
SYM_FUNC_START(__memcpy)
|
||||
ltgr %r4,%r4
|
||||
jz .Lmemcpy_exit
|
||||
aghi %r4,-1
|
||||
|
@ -141,7 +144,10 @@ ENTRY(__memcpy)
|
|||
j .Lmemcpy_remainder
|
||||
.Lmemcpy_mvc:
|
||||
mvc 0(1,%r1),0(%r3)
|
||||
ENDPROC(__memcpy)
|
||||
SYM_FUNC_END(__memcpy)
|
||||
EXPORT_SYMBOL(__memcpy)
|
||||
|
||||
SYM_FUNC_ALIAS(memcpy, __memcpy)
|
||||
EXPORT_SYMBOL(memcpy)
|
||||
|
||||
/*
|
||||
|
@ -152,7 +158,7 @@ EXPORT_SYMBOL(memcpy)
|
|||
* void *__memset64(uint64_t *s, uint64_t v, size_t count)
|
||||
*/
|
||||
.macro __MEMSET bits,bytes,insn
|
||||
ENTRY(__memset\bits)
|
||||
SYM_FUNC_START(__memset\bits)
|
||||
ltgr %r4,%r4
|
||||
jz .L__memset_exit\bits
|
||||
cghi %r4,\bytes
|
||||
|
@ -178,7 +184,7 @@ ENTRY(__memset\bits)
|
|||
BR_EX %r14
|
||||
.L__memset_mvc\bits:
|
||||
mvc \bytes(1,%r1),0(%r1)
|
||||
ENDPROC(__memset\bits)
|
||||
SYM_FUNC_END(__memset\bits)
|
||||
.endm
|
||||
|
||||
__MEMSET 16,2,sth
|
||||
|
|
|
@ -27,14 +27,13 @@ void debug_user_asce(int exit)
|
|||
"kernel: %016llx user: %016llx\n",
|
||||
exit ? "exit" : "entry", cr1, cr7,
|
||||
S390_lowcore.kernel_asce, S390_lowcore.user_asce);
|
||||
|
||||
}
|
||||
#endif /*CONFIG_DEBUG_ENTRY */
|
||||
|
||||
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
|
||||
unsigned long size, unsigned long key)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
unsigned long rem;
|
||||
union oac spec = {
|
||||
.oac2.key = key,
|
||||
.oac2.as = PSW_BITS_AS_SECONDARY,
|
||||
|
@ -42,28 +41,30 @@ static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
|
|||
.oac2.a = 1,
|
||||
};
|
||||
|
||||
tmp1 = -4096UL;
|
||||
asm volatile(
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%2),0(%1),%0\n"
|
||||
"6: jz 4f\n"
|
||||
"1: algr %0,%3\n"
|
||||
" slgr %1,%3\n"
|
||||
" slgr %2,%3\n"
|
||||
" j 0b\n"
|
||||
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
|
||||
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
|
||||
" slgr %4,%1\n"
|
||||
" clgr %0,%4\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"3: mvcos 0(%2),0(%1),%4\n"
|
||||
"7: slgr %0,%4\n"
|
||||
" j 5f\n"
|
||||
"4: slgr %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
|
||||
: "+a" (size), "+a" (from), "+a" (to), "+a" (tmp1), "=a" (tmp2)
|
||||
: [spec] "d" (spec.val)
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
|
||||
"1: jz 5f\n"
|
||||
" algr %[size],%[val]\n"
|
||||
" slgr %[from],%[val]\n"
|
||||
" slgr %[to],%[val]\n"
|
||||
" j 0b\n"
|
||||
"2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
|
||||
" nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
|
||||
" slgr %[rem],%[from]\n"
|
||||
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
|
||||
" jnh 6f\n"
|
||||
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
|
||||
"4: slgr %[size],%[rem]\n"
|
||||
" j 6f\n"
|
||||
"5: slgr %[size],%[size]\n"
|
||||
"6:\n"
|
||||
EX_TABLE(0b, 2b)
|
||||
EX_TABLE(1b, 2b)
|
||||
EX_TABLE(3b, 6b)
|
||||
EX_TABLE(4b, 6b)
|
||||
: [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
|
||||
: [val] "a" (-4096UL), [spec] "d" (spec.val)
|
||||
: "cc", "memory", "0");
|
||||
return size;
|
||||
}
|
||||
|
@ -94,7 +95,7 @@ EXPORT_SYMBOL(_copy_from_user_key);
|
|||
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
|
||||
unsigned long size, unsigned long key)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
unsigned long rem;
|
||||
union oac spec = {
|
||||
.oac1.key = key,
|
||||
.oac1.as = PSW_BITS_AS_SECONDARY,
|
||||
|
@ -102,28 +103,30 @@ static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
|
|||
.oac1.a = 1,
|
||||
};
|
||||
|
||||
tmp1 = -4096UL;
|
||||
asm volatile(
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%1),0(%2),%0\n"
|
||||
"6: jz 4f\n"
|
||||
"1: algr %0,%3\n"
|
||||
" slgr %1,%3\n"
|
||||
" slgr %2,%3\n"
|
||||
" j 0b\n"
|
||||
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
|
||||
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
|
||||
" slgr %4,%1\n"
|
||||
" clgr %0,%4\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"3: mvcos 0(%1),0(%2),%4\n"
|
||||
"7: slgr %0,%4\n"
|
||||
" j 5f\n"
|
||||
"4: slgr %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
|
||||
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
|
||||
: [spec] "d" (spec.val)
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
|
||||
"1: jz 5f\n"
|
||||
" algr %[size],%[val]\n"
|
||||
" slgr %[to],%[val]\n"
|
||||
" slgr %[from],%[val]\n"
|
||||
" j 0b\n"
|
||||
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
|
||||
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
|
||||
" slgr %[rem],%[to]\n"
|
||||
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
|
||||
" jnh 6f\n"
|
||||
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
|
||||
"4: slgr %[size],%[rem]\n"
|
||||
" j 6f\n"
|
||||
"5: slgr %[size],%[size]\n"
|
||||
"6:\n"
|
||||
EX_TABLE(0b, 2b)
|
||||
EX_TABLE(1b, 2b)
|
||||
EX_TABLE(3b, 6b)
|
||||
EX_TABLE(4b, 6b)
|
||||
: [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
|
||||
: [val] "a" (-4096UL), [spec] "d" (spec.val)
|
||||
: "cc", "memory", "0");
|
||||
return size;
|
||||
}
|
||||
|
@ -147,33 +150,35 @@ EXPORT_SYMBOL(_copy_to_user_key);
|
|||
|
||||
unsigned long __clear_user(void __user *to, unsigned long size)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
unsigned long rem;
|
||||
union oac spec = {
|
||||
.oac1.as = PSW_BITS_AS_SECONDARY,
|
||||
.oac1.a = 1,
|
||||
};
|
||||
|
||||
tmp1 = -4096UL;
|
||||
asm volatile(
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%1),0(%4),%0\n"
|
||||
"6: jz 4f\n"
|
||||
"1: algr %0,%2\n"
|
||||
" slgr %1,%2\n"
|
||||
" j 0b\n"
|
||||
"2: la %3,4095(%1)\n"/* %4 = to + 4095 */
|
||||
" nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
|
||||
" slgr %3,%1\n"
|
||||
" clgr %0,%3\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"3: mvcos 0(%1),0(%4),%3\n"
|
||||
"7: slgr %0,%3\n"
|
||||
" j 5f\n"
|
||||
"4: slgr %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
|
||||
: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
|
||||
: "a" (empty_zero_page), [spec] "d" (spec.val)
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n"
|
||||
"1: jz 5f\n"
|
||||
" algr %[size],%[val]\n"
|
||||
" slgr %[to],%[val]\n"
|
||||
" j 0b\n"
|
||||
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
|
||||
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
|
||||
" slgr %[rem],%[to]\n"
|
||||
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
|
||||
" jnh 6f\n"
|
||||
"3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n"
|
||||
"4: slgr %[size],%[rem]\n"
|
||||
" j 6f\n"
|
||||
"5: slgr %[size],%[size]\n"
|
||||
"6:\n"
|
||||
EX_TABLE(0b, 2b)
|
||||
EX_TABLE(1b, 2b)
|
||||
EX_TABLE(3b, 6b)
|
||||
EX_TABLE(4b, 6b)
|
||||
: [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem)
|
||||
: [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val)
|
||||
: "cc", "memory", "0");
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o
|
|||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
|
||||
obj-$(CONFIG_PGSTE) += gmap.o
|
||||
|
||||
KASAN_SANITIZE_kasan_init.o := n
|
||||
obj-$(CONFIG_KASAN) += kasan_init.o
|
||||
|
|
|
@ -335,16 +335,6 @@ static struct ctl_table cmm_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table cmm_dir_table[] = {
|
||||
{
|
||||
.procname = "vm",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = cmm_table,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
#define SMSG_PREFIX "CMM"
|
||||
static void cmm_smsg_target(const char *from, char *msg)
|
||||
|
@ -389,7 +379,7 @@ static int __init cmm_init(void)
|
|||
{
|
||||
int rc = -ENOMEM;
|
||||
|
||||
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
|
||||
cmm_sysctl_header = register_sysctl("vm", cmm_table);
|
||||
if (!cmm_sysctl_header)
|
||||
goto out_sysctl;
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
|
|
|
@ -176,9 +176,8 @@ void __init mem_init(void)
|
|||
|
||||
void free_initmem(void)
|
||||
{
|
||||
__set_memory((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RW | SET_MEMORY_NX);
|
||||
set_memory_rwnx((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,301 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uv.h>
|
||||
|
||||
static unsigned long segment_pos __initdata;
|
||||
static unsigned long segment_low __initdata;
|
||||
static bool has_edat __initdata;
|
||||
static bool has_nx __initdata;
|
||||
|
||||
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
|
||||
|
||||
static void __init kasan_early_panic(const char *reason)
|
||||
{
|
||||
sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
|
||||
sclp_early_printk(reason);
|
||||
disabled_wait();
|
||||
}
|
||||
|
||||
static void * __init kasan_early_alloc_segment(void)
|
||||
{
|
||||
segment_pos -= _SEGMENT_SIZE;
|
||||
|
||||
if (segment_pos < segment_low)
|
||||
kasan_early_panic("out of memory during initialisation\n");
|
||||
|
||||
return __va(segment_pos);
|
||||
}
|
||||
|
||||
static void * __init kasan_early_alloc_pages(unsigned int order)
|
||||
{
|
||||
pgalloc_pos -= (PAGE_SIZE << order);
|
||||
|
||||
if (pgalloc_pos < pgalloc_low)
|
||||
kasan_early_panic("out of memory during initialisation\n");
|
||||
|
||||
return __va(pgalloc_pos);
|
||||
}
|
||||
|
||||
static void * __init kasan_early_crst_alloc(unsigned long val)
|
||||
{
|
||||
unsigned long *table;
|
||||
|
||||
table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
|
||||
if (table)
|
||||
crst_table_init(table, val);
|
||||
return table;
|
||||
}
|
||||
|
||||
static pte_t * __init kasan_early_pte_alloc(void)
|
||||
{
|
||||
static void *pte_leftover;
|
||||
pte_t *pte;
|
||||
|
||||
BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
|
||||
|
||||
if (!pte_leftover) {
|
||||
pte_leftover = kasan_early_alloc_pages(0);
|
||||
pte = pte_leftover + _PAGE_TABLE_SIZE;
|
||||
} else {
|
||||
pte = pte_leftover;
|
||||
pte_leftover = NULL;
|
||||
}
|
||||
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
return pte;
|
||||
}
|
||||
|
||||
enum populate_mode {
|
||||
POPULATE_MAP,
|
||||
POPULATE_ZERO_SHADOW,
|
||||
POPULATE_SHALLOW
|
||||
};
|
||||
|
||||
static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
|
||||
{
|
||||
return __pgprot(pgprot_val(pgprot) & ~bit);
|
||||
}
|
||||
|
||||
static void __init kasan_early_pgtable_populate(unsigned long address,
|
||||
unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
|
||||
pgprot_t pgt_prot = PAGE_KERNEL;
|
||||
pgprot_t sgt_prot = SEGMENT_KERNEL;
|
||||
pgd_t *pg_dir;
|
||||
p4d_t *p4_dir;
|
||||
pud_t *pu_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
pmd_t pmd;
|
||||
pte_t pte;
|
||||
|
||||
if (!has_nx) {
|
||||
pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
|
||||
pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
|
||||
sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
|
||||
}
|
||||
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
if (mode == POPULATE_ZERO_SHADOW &&
|
||||
IS_ALIGNED(address, PGDIR_SIZE) &&
|
||||
end - address >= PGDIR_SIZE) {
|
||||
pgd_populate(&init_mm, pg_dir,
|
||||
kasan_early_shadow_p4d);
|
||||
address = (address + PGDIR_SIZE) & PGDIR_MASK;
|
||||
continue;
|
||||
}
|
||||
p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
|
||||
pgd_populate(&init_mm, pg_dir, p4_dir);
|
||||
}
|
||||
|
||||
if (mode == POPULATE_SHALLOW) {
|
||||
address = (address + P4D_SIZE) & P4D_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
p4_dir = p4d_offset(pg_dir, address);
|
||||
if (p4d_none(*p4_dir)) {
|
||||
if (mode == POPULATE_ZERO_SHADOW &&
|
||||
IS_ALIGNED(address, P4D_SIZE) &&
|
||||
end - address >= P4D_SIZE) {
|
||||
p4d_populate(&init_mm, p4_dir,
|
||||
kasan_early_shadow_pud);
|
||||
address = (address + P4D_SIZE) & P4D_MASK;
|
||||
continue;
|
||||
}
|
||||
pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
|
||||
p4d_populate(&init_mm, p4_dir, pu_dir);
|
||||
}
|
||||
|
||||
pu_dir = pud_offset(p4_dir, address);
|
||||
if (pud_none(*pu_dir)) {
|
||||
if (mode == POPULATE_ZERO_SHADOW &&
|
||||
IS_ALIGNED(address, PUD_SIZE) &&
|
||||
end - address >= PUD_SIZE) {
|
||||
pud_populate(&init_mm, pu_dir,
|
||||
kasan_early_shadow_pmd);
|
||||
address = (address + PUD_SIZE) & PUD_MASK;
|
||||
continue;
|
||||
}
|
||||
pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
||||
pud_populate(&init_mm, pu_dir, pm_dir);
|
||||
}
|
||||
|
||||
pm_dir = pmd_offset(pu_dir, address);
|
||||
if (pmd_none(*pm_dir)) {
|
||||
if (IS_ALIGNED(address, PMD_SIZE) &&
|
||||
end - address >= PMD_SIZE) {
|
||||
if (mode == POPULATE_ZERO_SHADOW) {
|
||||
pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
} else if (has_edat) {
|
||||
void *page = kasan_early_alloc_segment();
|
||||
|
||||
memset(page, 0, _SEGMENT_SIZE);
|
||||
pmd = __pmd(__pa(page));
|
||||
pmd = set_pmd_bit(pmd, sgt_prot);
|
||||
set_pmd(pm_dir, pmd);
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
pt_dir = kasan_early_pte_alloc();
|
||||
pmd_populate(&init_mm, pm_dir, pt_dir);
|
||||
} else if (pmd_large(*pm_dir)) {
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
if (pte_none(*pt_dir)) {
|
||||
void *page;
|
||||
|
||||
switch (mode) {
|
||||
case POPULATE_MAP:
|
||||
page = kasan_early_alloc_pages(0);
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
pte = __pte(__pa(page));
|
||||
pte = set_pte_bit(pte, pgt_prot);
|
||||
set_pte(pt_dir, pte);
|
||||
break;
|
||||
case POPULATE_ZERO_SHADOW:
|
||||
page = kasan_early_shadow_page;
|
||||
pte = __pte(__pa(page));
|
||||
pte = set_pte_bit(pte, pgt_prot_zero);
|
||||
set_pte(pt_dir, pte);
|
||||
break;
|
||||
case POPULATE_SHALLOW:
|
||||
/* should never happen */
|
||||
break;
|
||||
}
|
||||
}
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init kasan_early_detect_facilities(void)
|
||||
{
|
||||
if (test_facility(8)) {
|
||||
has_edat = true;
|
||||
__ctl_set_bit(0, 23);
|
||||
}
|
||||
if (!noexec_disabled && test_facility(130)) {
|
||||
has_nx = true;
|
||||
__ctl_set_bit(0, 20);
|
||||
}
|
||||
}
|
||||
|
||||
void __init kasan_early_init(void)
|
||||
{
|
||||
pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
|
||||
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
|
||||
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
|
||||
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
|
||||
unsigned long untracked_end = MODULES_VADDR;
|
||||
unsigned long shadow_alloc_size;
|
||||
unsigned long start, end;
|
||||
int i;
|
||||
|
||||
kasan_early_detect_facilities();
|
||||
if (!has_nx)
|
||||
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
|
||||
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
|
||||
|
||||
/* init kasan zero shadow */
|
||||
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
|
||||
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
|
||||
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
|
||||
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
|
||||
|
||||
if (has_edat) {
|
||||
shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
|
||||
segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
|
||||
segment_low = segment_pos - shadow_alloc_size;
|
||||
segment_low = round_down(segment_low, _SEGMENT_SIZE);
|
||||
pgalloc_pos = segment_low;
|
||||
}
|
||||
/*
|
||||
* Current memory layout:
|
||||
* +- 0 -------------+ +- shadow start -+
|
||||
* |1:1 ident mapping| /|1/8 of ident map|
|
||||
* | | / | |
|
||||
* +-end of ident map+ / +----------------+
|
||||
* | ... gap ... | / | kasan |
|
||||
* | | / | zero page |
|
||||
* +- vmalloc area -+ / | mapping |
|
||||
* | vmalloc_size | / | (untracked) |
|
||||
* +- modules vaddr -+ / +----------------+
|
||||
* | 2Gb |/ | unmapped | allocated per module
|
||||
* +- shadow start -+ +----------------+
|
||||
* | 1/8 addr space | | zero pg mapping| (untracked)
|
||||
* +- shadow end ----+---------+- shadow end ---+
|
||||
*
|
||||
* Current memory layout (KASAN_VMALLOC):
|
||||
* +- 0 -------------+ +- shadow start -+
|
||||
* |1:1 ident mapping| /|1/8 of ident map|
|
||||
* | | / | |
|
||||
* +-end of ident map+ / +----------------+
|
||||
* | ... gap ... | / | kasan zero page| (untracked)
|
||||
* | | / | mapping |
|
||||
* +- vmalloc area -+ / +----------------+
|
||||
* | vmalloc_size | / |shallow populate|
|
||||
* +- modules vaddr -+ / +----------------+
|
||||
* | 2Gb |/ |shallow populate|
|
||||
* +- shadow start -+ +----------------+
|
||||
* | 1/8 addr space | | zero pg mapping| (untracked)
|
||||
* +- shadow end ----+---------+- shadow end ---+
|
||||
*/
|
||||
/* populate kasan shadow (for identity mapping and zero page mapping) */
|
||||
for_each_mem_detect_usable_block(i, &start, &end)
|
||||
kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
untracked_end = VMALLOC_START;
|
||||
/* shallowly populate kasan shadow for vmalloc and modules */
|
||||
kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
|
||||
POPULATE_SHALLOW);
|
||||
}
|
||||
/* populate kasan shadow for untracked memory */
|
||||
kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
|
||||
POPULATE_ZERO_SHADOW);
|
||||
kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
|
||||
POPULATE_ZERO_SHADOW);
|
||||
/* enable kasan */
|
||||
init_task.kasan_depth = 0;
|
||||
sclp_early_printk("KernelAddressSanitizer initialized\n");
|
||||
}
|
|
@ -4,6 +4,7 @@
|
|||
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/facility.h>
|
||||
|
@ -41,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
|
||||
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
|
||||
|
||||
void arch_report_meminfo(struct seq_file *m)
|
||||
{
|
||||
|
@ -101,6 +102,14 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|||
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
|
||||
else if (flags & SET_MEMORY_X)
|
||||
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
|
||||
if (flags & SET_MEMORY_INV) {
|
||||
new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
|
||||
} else if (flags & SET_MEMORY_DEF) {
|
||||
new = __pte(pte_val(new) & PAGE_MASK);
|
||||
new = set_pte_bit(new, PAGE_KERNEL);
|
||||
if (!MACHINE_HAS_NX)
|
||||
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
|
||||
}
|
||||
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
|
@ -151,6 +160,14 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
|
|||
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
|
||||
else if (flags & SET_MEMORY_X)
|
||||
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
|
||||
if (flags & SET_MEMORY_INV) {
|
||||
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
|
||||
} else if (flags & SET_MEMORY_DEF) {
|
||||
new = __pmd(pmd_val(new) & PMD_MASK);
|
||||
new = set_pmd_bit(new, SEGMENT_KERNEL);
|
||||
if (!MACHINE_HAS_NX)
|
||||
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
|
||||
}
|
||||
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
|
||||
}
|
||||
|
||||
|
@ -232,6 +249,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
|
|||
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
|
||||
else if (flags & SET_MEMORY_X)
|
||||
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
|
||||
if (flags & SET_MEMORY_INV) {
|
||||
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
|
||||
} else if (flags & SET_MEMORY_DEF) {
|
||||
new = __pud(pud_val(new) & PUD_MASK);
|
||||
new = set_pud_bit(new, REGION3_KERNEL);
|
||||
if (!MACHINE_HAS_NX)
|
||||
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
|
||||
}
|
||||
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
|
||||
}
|
||||
|
||||
|
@ -298,11 +323,6 @@ static int change_page_attr(unsigned long addr, unsigned long end,
|
|||
int rc = -EINVAL;
|
||||
pgd_t *pgdp;
|
||||
|
||||
if (addr == end)
|
||||
return 0;
|
||||
if (end >= MODULES_END)
|
||||
return -EINVAL;
|
||||
mutex_lock(&cpa_mutex);
|
||||
pgdp = pgd_offset_k(addr);
|
||||
do {
|
||||
if (pgd_none(*pgdp))
|
||||
|
@ -313,18 +333,76 @@ static int change_page_attr(unsigned long addr, unsigned long end,
|
|||
break;
|
||||
cond_resched();
|
||||
} while (pgdp++, addr = next, addr < end && !rc);
|
||||
mutex_unlock(&cpa_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int change_page_attr_alias(unsigned long addr, unsigned long end,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long alias, offset, va_start, va_end;
|
||||
struct vm_struct *area;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* Changes to read-only permissions on kernel VA mappings are also
|
||||
* applied to the kernel direct mapping. Execute permissions are
|
||||
* intentionally not transferred to keep all allocated pages within
|
||||
* the direct mapping non-executable.
|
||||
*/
|
||||
flags &= SET_MEMORY_RO | SET_MEMORY_RW;
|
||||
if (!flags)
|
||||
return 0;
|
||||
area = NULL;
|
||||
while (addr < end) {
|
||||
if (!area)
|
||||
area = find_vm_area((void *)addr);
|
||||
if (!area || !(area->flags & VM_ALLOC))
|
||||
return 0;
|
||||
va_start = (unsigned long)area->addr;
|
||||
va_end = va_start + area->nr_pages * PAGE_SIZE;
|
||||
offset = (addr - va_start) >> PAGE_SHIFT;
|
||||
alias = (unsigned long)page_address(area->pages[offset]);
|
||||
rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
|
||||
if (rc)
|
||||
break;
|
||||
addr += PAGE_SIZE;
|
||||
if (addr >= va_end)
|
||||
area = NULL;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int __set_memory(unsigned long addr, int numpages, unsigned long flags)
|
||||
{
|
||||
unsigned long end;
|
||||
int rc;
|
||||
|
||||
if (!MACHINE_HAS_NX)
|
||||
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
|
||||
if (!flags)
|
||||
return 0;
|
||||
if (!numpages)
|
||||
return 0;
|
||||
addr &= PAGE_MASK;
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
|
||||
end = addr + numpages * PAGE_SIZE;
|
||||
mutex_lock(&cpa_mutex);
|
||||
rc = change_page_attr(addr, end, flags);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = change_page_attr_alias(addr, end, flags);
|
||||
out:
|
||||
mutex_unlock(&cpa_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page)
|
||||
{
|
||||
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
|
||||
}
|
||||
|
||||
int set_direct_map_default_noflush(struct page *page)
|
||||
{
|
||||
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
|
|
|
@ -33,19 +33,9 @@ static struct ctl_table page_table_sysctl[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table page_table_sysctl_dir[] = {
|
||||
{
|
||||
.procname = "vm",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = page_table_sysctl,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init page_table_register_sysctl(void)
|
||||
{
|
||||
return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
|
||||
return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
|
||||
}
|
||||
__initcall(page_table_register_sysctl);
|
||||
|
||||
|
@ -143,13 +133,7 @@ err_p4d:
|
|||
|
||||
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
|
||||
{
|
||||
unsigned int old, new;
|
||||
|
||||
do {
|
||||
old = atomic_read(v);
|
||||
new = old ^ bits;
|
||||
} while (atomic_cmpxchg(v, old, new) != old);
|
||||
return new;
|
||||
return atomic_fetch_xor(bits, v) ^ bits;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PGSTE
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size)
|
|||
swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
|
||||
#endif
|
||||
/*
|
||||
* map whole physical memory to virtual memory (identity mapping)
|
||||
* we reserve enough space in the vmalloc area for vmemmap to hotplug
|
||||
|
@ -728,23 +732,24 @@ void __init vmem_map_init(void)
|
|||
memblock_region_cmp, memblock_region_swap);
|
||||
__for_each_mem_range(i, &memblock.memory, &memory_rwx,
|
||||
NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) {
|
||||
__set_memory((unsigned long)__va(base),
|
||||
(end - base) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RW | SET_MEMORY_NX);
|
||||
set_memory_rwnx((unsigned long)__va(base),
|
||||
(end - base) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
__set_memory((unsigned long)_stext,
|
||||
(unsigned long)(_etext - _stext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
__set_memory((unsigned long)_etext,
|
||||
(unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO);
|
||||
__set_memory((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
__set_memory(__stext_amode31,
|
||||
(__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
#ifdef CONFIG_KASAN
|
||||
for_each_mem_range(i, &base, &end) {
|
||||
set_memory_rwnx(__sha(base),
|
||||
(__sha(end) - __sha(base)) >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
set_memory_rox((unsigned long)_stext,
|
||||
(unsigned long)(_etext - _stext) >> PAGE_SHIFT);
|
||||
set_memory_ro((unsigned long)_etext,
|
||||
(unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
|
||||
set_memory_rox((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
|
||||
set_memory_rox(__stext_amode31,
|
||||
(__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
|
||||
|
||||
/* lowcore must be executable for LPSWE */
|
||||
if (static_key_enabled(&cpu_has_bear))
|
||||
|
|
|
@ -874,32 +874,15 @@ bool zpci_is_device_configured(struct zpci_dev *zdev)
|
|||
* @fh: The general function handle supplied by the platform
|
||||
*
|
||||
* Given a device in the configuration state Configured, enables, scans and
|
||||
* adds it to the common code PCI subsystem if possible. If the PCI device is
|
||||
* parked because we can not yet create a PCI bus because we have not seen
|
||||
* function 0, it is ignored but will be scanned once function 0 appears.
|
||||
* If any failure occurs, the zpci_dev is left disabled.
|
||||
* adds it to the common code PCI subsystem if possible. If any failure occurs,
|
||||
* the zpci_dev is left disabled.
|
||||
*
|
||||
* Return: 0 on success, or an error code otherwise
|
||||
*/
|
||||
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
|
||||
{
|
||||
int rc;
|
||||
|
||||
zpci_update_fh(zdev, fh);
|
||||
/* the PCI function will be scanned once function 0 appears */
|
||||
if (!zdev->zbus->bus)
|
||||
return 0;
|
||||
|
||||
/* For function 0 on a multi-function bus scan whole bus as we might
|
||||
* have to pick up existing functions waiting for it to allow creating
|
||||
* the PCI bus
|
||||
*/
|
||||
if (zdev->devfn == 0 && zdev->zbus->multifunction)
|
||||
rc = zpci_bus_scan_bus(zdev->zbus);
|
||||
else
|
||||
rc = zpci_bus_scan_device(zdev);
|
||||
|
||||
return rc;
|
||||
return zpci_bus_scan_device(zdev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -85,9 +85,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev)
|
|||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
pci_bus_add_device(pdev);
|
||||
pci_lock_rescan_remove();
|
||||
pci_bus_add_devices(zdev->zbus->bus);
|
||||
pci_bus_add_device(pdev);
|
||||
pci_unlock_rescan_remove();
|
||||
|
||||
return 0;
|
||||
|
@ -130,11 +129,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error)
|
|||
* @zbus: the zbus to be scanned
|
||||
*
|
||||
* Enables and scans all PCI functions on the bus making them available to the
|
||||
* common PCI code. If there is no function 0 on the zbus nothing is scanned. If
|
||||
* a function does not have a slot yet because it was added to the zbus before
|
||||
* function 0 the slot is created. If a PCI function fails to be initialized
|
||||
* an error will be returned but attempts will still be made for all other
|
||||
* functions on the bus.
|
||||
* common PCI code. If a PCI function fails to be initialized an error will be
|
||||
* returned but attempts will still be made for all other functions on the bus.
|
||||
*
|
||||
* Return: 0 on success, an error value otherwise
|
||||
*/
|
||||
|
@ -211,7 +207,6 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
|
|||
}
|
||||
|
||||
zbus->bus = bus;
|
||||
pci_bus_add_devices(bus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -76,9 +76,9 @@
|
|||
diag %r0,%r1,0x308
|
||||
.endm
|
||||
|
||||
.text
|
||||
.align PAGE_SIZE
|
||||
ENTRY(purgatory_start)
|
||||
.text
|
||||
.balign PAGE_SIZE
|
||||
SYM_CODE_START(purgatory_start)
|
||||
/* The purgatory might be called after a diag308 so better set
|
||||
* architecture and addressing mode.
|
||||
*/
|
||||
|
@ -245,45 +245,21 @@ ENTRY(purgatory_start)
|
|||
|
||||
/* start crash kernel */
|
||||
START_NEXT_KERNEL .base_dst 1
|
||||
SYM_CODE_END(purgatory_start)
|
||||
|
||||
|
||||
load_psw_mask:
|
||||
.long 0x00080000,0x80000000
|
||||
|
||||
.align 8
|
||||
disabled_wait_psw:
|
||||
.quad 0x0002000180000000
|
||||
.quad 0x0000000000000000 + .do_checksum_verification
|
||||
|
||||
gprregs:
|
||||
.rept 10
|
||||
.quad 0
|
||||
.endr
|
||||
|
||||
/* Macro to define a global variable with name and size (in bytes) to be
|
||||
* shared with C code.
|
||||
*
|
||||
* Add the .size and .type attribute to satisfy checks on the Elf_Sym during
|
||||
* purgatory load.
|
||||
*/
|
||||
.macro GLOBAL_VARIABLE name,size
|
||||
\name:
|
||||
.global \name
|
||||
.size \name,\size
|
||||
.type \name,object
|
||||
.skip \size,0
|
||||
.endm
|
||||
|
||||
GLOBAL_VARIABLE purgatory_sha256_digest,32
|
||||
GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE
|
||||
GLOBAL_VARIABLE kernel_entry,8
|
||||
GLOBAL_VARIABLE kernel_type,8
|
||||
GLOBAL_VARIABLE crash_start,8
|
||||
GLOBAL_VARIABLE crash_size,8
|
||||
|
||||
.align PAGE_SIZE
|
||||
stack:
|
||||
SYM_DATA_LOCAL(load_psw_mask, .long 0x00080000,0x80000000)
|
||||
.balign 8
|
||||
SYM_DATA_LOCAL(disabled_wait_psw, .quad 0x0002000180000000,.do_checksum_verification)
|
||||
SYM_DATA_LOCAL(gprregs, .fill 10,8,0)
|
||||
SYM_DATA(purgatory_sha256_digest, .skip 32)
|
||||
SYM_DATA(purgatory_sha_regions, .skip 16*__KEXEC_SHA_REGION_SIZE)
|
||||
SYM_DATA(kernel_entry, .skip 8)
|
||||
SYM_DATA(kernel_type, .skip 8)
|
||||
SYM_DATA(crash_start, .skip 8)
|
||||
SYM_DATA(crash_size, .skip 8)
|
||||
.balign PAGE_SIZE
|
||||
SYM_DATA_START_LOCAL(stack)
|
||||
/* The buffer to move this code must be as big as the code. */
|
||||
.skip stack-purgatory_start
|
||||
.align PAGE_SIZE
|
||||
purgatory_end:
|
||||
.balign PAGE_SIZE
|
||||
SYM_DATA_END_LABEL(stack, SYM_L_LOCAL, purgatory_end)
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.section .rodata, "a"
|
||||
|
||||
.align 8
|
||||
kexec_purgatory:
|
||||
.globl kexec_purgatory
|
||||
.balign 8
|
||||
SYM_DATA_START(kexec_purgatory)
|
||||
.incbin "arch/s390/purgatory/purgatory.ro"
|
||||
.Lkexec_purgatroy_end:
|
||||
SYM_DATA_END_LABEL(kexec_purgatory, SYM_L_LOCAL, kexec_purgatory_end)
|
||||
|
||||
.align 8
|
||||
kexec_purgatory_size:
|
||||
.globl kexec_purgatory_size
|
||||
.quad .Lkexec_purgatroy_end - kexec_purgatory
|
||||
.balign 8
|
||||
SYM_DATA(kexec_purgatory_size, .quad kexec_purgatory_end-kexec_purgatory)
|
||||
|
|
|
@ -204,7 +204,7 @@ struct read_storage_sccb {
|
|||
u16 assigned;
|
||||
u16 standby;
|
||||
u16 :16;
|
||||
u32 entries[0];
|
||||
u32 entries[];
|
||||
} __packed;
|
||||
|
||||
static inline void sclp_fill_core_info(struct sclp_core_info *info,
|
||||
|
|
|
@ -241,7 +241,7 @@ struct attach_storage_sccb {
|
|||
u16 :16;
|
||||
u16 assigned;
|
||||
u32 :32;
|
||||
u32 entries[0];
|
||||
u32 entries[];
|
||||
} __packed;
|
||||
|
||||
static int sclp_attach_storage(u8 id)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <asm/ebcdic.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/mem_detect.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <asm/facility.h>
|
||||
#include "sclp.h"
|
||||
#include "sclp_rw.h"
|
||||
|
@ -336,7 +336,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
|
|||
|
||||
#define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
|
||||
|
||||
void __weak __init add_mem_detect_block(u64 start, u64 end) {}
|
||||
void __weak __init add_physmem_online_range(u64 start, u64 end) {}
|
||||
int __init sclp_early_read_storage_info(void)
|
||||
{
|
||||
struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
|
||||
|
@ -369,7 +369,7 @@ int __init sclp_early_read_storage_info(void)
|
|||
if (!sccb->entries[sn])
|
||||
continue;
|
||||
rn = sccb->entries[sn] >> 16;
|
||||
add_mem_detect_block((rn - 1) * rzm, rn * rzm);
|
||||
add_physmem_online_range((rn - 1) * rzm, rn * rzm);
|
||||
}
|
||||
break;
|
||||
case 0x0310:
|
||||
|
@ -382,6 +382,6 @@ int __init sclp_early_read_storage_info(void)
|
|||
|
||||
return 0;
|
||||
fail:
|
||||
mem_detect.count = 0;
|
||||
physmem_info.range_count = 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -1171,7 +1171,7 @@ int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
|
|||
u8 cssid;
|
||||
u8 iid;
|
||||
u32 : 16;
|
||||
} list[0];
|
||||
} list[];
|
||||
} *sdcal_area;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ struct chsc_scpd {
|
|||
u32 zeroes1;
|
||||
struct chsc_header response;
|
||||
u32:32;
|
||||
u8 data[0];
|
||||
u8 data[];
|
||||
} __packed __aligned(PAGE_SIZE);
|
||||
|
||||
struct chsc_sda_area {
|
||||
|
|
|
@ -122,7 +122,13 @@ static struct hrtimer ap_poll_timer;
|
|||
* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
|
||||
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
|
||||
*/
|
||||
static unsigned long long poll_timeout = 250000;
|
||||
static unsigned long poll_high_timeout = 250000UL;
|
||||
|
||||
/*
|
||||
* Some state machine states only require a low frequency polling.
|
||||
* We use 25 Hz frequency for these.
|
||||
*/
|
||||
static unsigned long poll_low_timeout = 40000000UL;
|
||||
|
||||
/* Maximum domain id, if not given via qci */
|
||||
static int ap_max_domain_id = 15;
|
||||
|
@ -200,6 +206,18 @@ static inline int ap_qact_available(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_sb_available(): Test if the AP secure binding facility is available.
|
||||
*
|
||||
* Returns 1 if secure binding facility is available.
|
||||
*/
|
||||
int ap_sb_available(void)
|
||||
{
|
||||
if (ap_qci_info)
|
||||
return ap_qci_info->apsb;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_fetch_qci_info(): Fetch cryptographic config info
|
||||
*
|
||||
|
@ -248,13 +266,13 @@ static void __init ap_init_qci_info(void)
|
|||
AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
|
||||
|
||||
if (ap_qci_info->apxa) {
|
||||
if (ap_qci_info->Na) {
|
||||
ap_max_adapter_id = ap_qci_info->Na;
|
||||
if (ap_qci_info->na) {
|
||||
ap_max_adapter_id = ap_qci_info->na;
|
||||
AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
|
||||
__func__, ap_max_adapter_id);
|
||||
}
|
||||
if (ap_qci_info->Nd) {
|
||||
ap_max_domain_id = ap_qci_info->Nd;
|
||||
if (ap_qci_info->nd) {
|
||||
ap_max_domain_id = ap_qci_info->nd;
|
||||
AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
|
||||
__func__, ap_max_domain_id);
|
||||
}
|
||||
|
@ -324,35 +342,32 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
|
|||
|
||||
/*
|
||||
* ap_queue_info(): Check and get AP queue info.
|
||||
* Returns true if TAPQ succeeded and the info is filled or
|
||||
* false otherwise.
|
||||
* Returns: 1 if APQN exists and info is filled,
|
||||
* 0 if APQN seems to exit but there is no info
|
||||
* available (eg. caused by an asynch pending error)
|
||||
* -1 invalid APQN, TAPQ error or AP queue status which
|
||||
* indicates there is no APQN.
|
||||
*/
|
||||
static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
|
||||
int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
|
||||
static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
|
||||
int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
union {
|
||||
unsigned long value;
|
||||
struct {
|
||||
unsigned int fac : 32; /* facility bits */
|
||||
unsigned int at : 8; /* ap type */
|
||||
unsigned int _res1 : 8;
|
||||
unsigned int _res2 : 4;
|
||||
unsigned int ml : 4; /* apxl ml */
|
||||
unsigned int _res3 : 4;
|
||||
unsigned int qd : 4; /* queue depth */
|
||||
} tapq_gr2;
|
||||
} tapq_info;
|
||||
struct ap_tapq_gr2 tapq_info;
|
||||
|
||||
tapq_info.value = 0;
|
||||
|
||||
/* make sure we don't run into a specifiation exception */
|
||||
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
|
||||
AP_QID_QUEUE(qid) > ap_max_domain_id)
|
||||
return false;
|
||||
return -1;
|
||||
|
||||
/* call TAPQ on this APQN */
|
||||
status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
|
||||
status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
|
||||
|
||||
/* handle pending async error with return 'no info available' */
|
||||
if (status.async)
|
||||
return 0;
|
||||
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
|
@ -365,11 +380,11 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
|
|||
* there is at least one of the mode bits set.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!tapq_info.value))
|
||||
return false;
|
||||
*q_type = tapq_info.tapq_gr2.at;
|
||||
*q_fac = tapq_info.tapq_gr2.fac;
|
||||
*q_depth = tapq_info.tapq_gr2.qd;
|
||||
*q_ml = tapq_info.tapq_gr2.ml;
|
||||
return 0;
|
||||
*q_type = tapq_info.at;
|
||||
*q_fac = tapq_info.fac;
|
||||
*q_depth = tapq_info.qd;
|
||||
*q_ml = tapq_info.ml;
|
||||
*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
|
||||
*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
|
||||
switch (*q_type) {
|
||||
|
@ -389,12 +404,12 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
return 1;
|
||||
default:
|
||||
/*
|
||||
* A response code which indicates, there is no info available.
|
||||
*/
|
||||
return false;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,10 +427,13 @@ void ap_wait(enum ap_sm_wait wait)
|
|||
break;
|
||||
}
|
||||
fallthrough;
|
||||
case AP_SM_WAIT_TIMEOUT:
|
||||
case AP_SM_WAIT_LOW_TIMEOUT:
|
||||
case AP_SM_WAIT_HIGH_TIMEOUT:
|
||||
spin_lock_bh(&ap_poll_timer_lock);
|
||||
if (!hrtimer_is_queued(&ap_poll_timer)) {
|
||||
hr_time = poll_timeout;
|
||||
hr_time =
|
||||
wait == AP_SM_WAIT_LOW_TIMEOUT ?
|
||||
poll_low_timeout : poll_high_timeout;
|
||||
hrtimer_forward_now(&ap_poll_timer, hr_time);
|
||||
hrtimer_restart(&ap_poll_timer);
|
||||
}
|
||||
|
@ -1168,7 +1186,7 @@ EXPORT_SYMBOL(ap_parse_mask_str);
|
|||
|
||||
static ssize_t ap_domain_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
|
||||
return sysfs_emit(buf, "%d\n", ap_domain_index);
|
||||
}
|
||||
|
||||
static ssize_t ap_domain_store(const struct bus_type *bus,
|
||||
|
@ -1196,14 +1214,13 @@ static BUS_ATTR_RW(ap_domain);
|
|||
static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
if (!ap_qci_info) /* QCI not supported */
|
||||
return scnprintf(buf, PAGE_SIZE, "not supported\n");
|
||||
return sysfs_emit(buf, "not supported\n");
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
|
||||
ap_qci_info->adm[0], ap_qci_info->adm[1],
|
||||
ap_qci_info->adm[2], ap_qci_info->adm[3],
|
||||
ap_qci_info->adm[4], ap_qci_info->adm[5],
|
||||
ap_qci_info->adm[6], ap_qci_info->adm[7]);
|
||||
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
|
||||
ap_qci_info->adm[0], ap_qci_info->adm[1],
|
||||
ap_qci_info->adm[2], ap_qci_info->adm[3],
|
||||
ap_qci_info->adm[4], ap_qci_info->adm[5],
|
||||
ap_qci_info->adm[6], ap_qci_info->adm[7]);
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(ap_control_domain_mask);
|
||||
|
@ -1211,14 +1228,13 @@ static BUS_ATTR_RO(ap_control_domain_mask);
|
|||
static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
if (!ap_qci_info) /* QCI not supported */
|
||||
return scnprintf(buf, PAGE_SIZE, "not supported\n");
|
||||
return sysfs_emit(buf, "not supported\n");
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
|
||||
ap_qci_info->aqm[0], ap_qci_info->aqm[1],
|
||||
ap_qci_info->aqm[2], ap_qci_info->aqm[3],
|
||||
ap_qci_info->aqm[4], ap_qci_info->aqm[5],
|
||||
ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
|
||||
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
|
||||
ap_qci_info->aqm[0], ap_qci_info->aqm[1],
|
||||
ap_qci_info->aqm[2], ap_qci_info->aqm[3],
|
||||
ap_qci_info->aqm[4], ap_qci_info->aqm[5],
|
||||
ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(ap_usage_domain_mask);
|
||||
|
@ -1226,29 +1242,27 @@ static BUS_ATTR_RO(ap_usage_domain_mask);
|
|||
static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
if (!ap_qci_info) /* QCI not supported */
|
||||
return scnprintf(buf, PAGE_SIZE, "not supported\n");
|
||||
return sysfs_emit(buf, "not supported\n");
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE,
|
||||
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
|
||||
ap_qci_info->apm[0], ap_qci_info->apm[1],
|
||||
ap_qci_info->apm[2], ap_qci_info->apm[3],
|
||||
ap_qci_info->apm[4], ap_qci_info->apm[5],
|
||||
ap_qci_info->apm[6], ap_qci_info->apm[7]);
|
||||
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
|
||||
ap_qci_info->apm[0], ap_qci_info->apm[1],
|
||||
ap_qci_info->apm[2], ap_qci_info->apm[3],
|
||||
ap_qci_info->apm[4], ap_qci_info->apm[5],
|
||||
ap_qci_info->apm[6], ap_qci_info->apm[7]);
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(ap_adapter_mask);
|
||||
|
||||
static ssize_t ap_interrupts_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n",
|
||||
ap_irq_flag ? 1 : 0);
|
||||
return sysfs_emit(buf, "%d\n", ap_irq_flag ? 1 : 0);
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(ap_interrupts);
|
||||
|
||||
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
|
||||
return sysfs_emit(buf, "%d\n", ap_config_time);
|
||||
}
|
||||
|
||||
static ssize_t config_time_store(const struct bus_type *bus,
|
||||
|
@ -1267,17 +1281,20 @@ static BUS_ATTR_RW(config_time);
|
|||
|
||||
static ssize_t poll_thread_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
|
||||
return sysfs_emit(buf, "%d\n", ap_poll_kthread ? 1 : 0);
|
||||
}
|
||||
|
||||
static ssize_t poll_thread_store(const struct bus_type *bus,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int flag, rc;
|
||||
bool value;
|
||||
int rc;
|
||||
|
||||
if (sscanf(buf, "%d\n", &flag) != 1)
|
||||
return -EINVAL;
|
||||
if (flag) {
|
||||
rc = kstrtobool(buf, &value);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (value) {
|
||||
rc = ap_poll_thread_start();
|
||||
if (rc)
|
||||
count = rc;
|
||||
|
@ -1291,21 +1308,25 @@ static BUS_ATTR_RW(poll_thread);
|
|||
|
||||
static ssize_t poll_timeout_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
|
||||
return sysfs_emit(buf, "%lu\n", poll_high_timeout);
|
||||
}
|
||||
|
||||
static ssize_t poll_timeout_store(const struct bus_type *bus, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
unsigned long long time;
|
||||
unsigned long value;
|
||||
ktime_t hr_time;
|
||||
int rc;
|
||||
|
||||
rc = kstrtoul(buf, 0, &value);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* 120 seconds = maximum poll interval */
|
||||
if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
|
||||
time > 120000000000ULL)
|
||||
if (value > 120000000000UL)
|
||||
return -EINVAL;
|
||||
poll_timeout = time;
|
||||
hr_time = poll_timeout;
|
||||
poll_high_timeout = value;
|
||||
hr_time = poll_high_timeout;
|
||||
|
||||
spin_lock_bh(&ap_poll_timer_lock);
|
||||
hrtimer_cancel(&ap_poll_timer);
|
||||
|
@ -1320,14 +1341,14 @@ static BUS_ATTR_RW(poll_timeout);
|
|||
|
||||
static ssize_t ap_max_domain_id_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
|
||||
return sysfs_emit(buf, "%d\n", ap_max_domain_id);
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(ap_max_domain_id);
|
||||
|
||||
static ssize_t ap_max_adapter_id_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
|
||||
return sysfs_emit(buf, "%d\n", ap_max_adapter_id);
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(ap_max_adapter_id);
|
||||
|
@ -1338,10 +1359,9 @@ static ssize_t apmask_show(const struct bus_type *bus, char *buf)
|
|||
|
||||
if (mutex_lock_interruptible(&ap_perms_mutex))
|
||||
return -ERESTARTSYS;
|
||||
rc = scnprintf(buf, PAGE_SIZE,
|
||||
"0x%016lx%016lx%016lx%016lx\n",
|
||||
ap_perms.apm[0], ap_perms.apm[1],
|
||||
ap_perms.apm[2], ap_perms.apm[3]);
|
||||
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
|
||||
ap_perms.apm[0], ap_perms.apm[1],
|
||||
ap_perms.apm[2], ap_perms.apm[3]);
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
|
||||
return rc;
|
||||
|
@ -1431,10 +1451,9 @@ static ssize_t aqmask_show(const struct bus_type *bus, char *buf)
|
|||
|
||||
if (mutex_lock_interruptible(&ap_perms_mutex))
|
||||
return -ERESTARTSYS;
|
||||
rc = scnprintf(buf, PAGE_SIZE,
|
||||
"0x%016lx%016lx%016lx%016lx\n",
|
||||
ap_perms.aqm[0], ap_perms.aqm[1],
|
||||
ap_perms.aqm[2], ap_perms.aqm[3]);
|
||||
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
|
||||
ap_perms.aqm[0], ap_perms.aqm[1],
|
||||
ap_perms.aqm[2], ap_perms.aqm[3]);
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
|
||||
return rc;
|
||||
|
@ -1520,8 +1539,7 @@ static BUS_ATTR_RW(aqmask);
|
|||
|
||||
static ssize_t scans_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
atomic64_read(&ap_scan_bus_count));
|
||||
return sysfs_emit(buf, "%llu\n", atomic64_read(&ap_scan_bus_count));
|
||||
}
|
||||
|
||||
static ssize_t scans_store(const struct bus_type *bus, const char *buf,
|
||||
|
@ -1543,15 +1561,40 @@ static ssize_t bindings_show(const struct bus_type *bus, char *buf)
|
|||
|
||||
ap_calc_bound_apqns(&apqns, &n);
|
||||
if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
|
||||
rc = scnprintf(buf, PAGE_SIZE, "%u/%u (complete)\n", n, apqns);
|
||||
rc = sysfs_emit(buf, "%u/%u (complete)\n", n, apqns);
|
||||
else
|
||||
rc = scnprintf(buf, PAGE_SIZE, "%u/%u\n", n, apqns);
|
||||
rc = sysfs_emit(buf, "%u/%u\n", n, apqns);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(bindings);
|
||||
|
||||
static ssize_t features_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
if (!ap_qci_info) /* QCI not supported */
|
||||
return sysfs_emit(buf, "-\n");
|
||||
|
||||
if (ap_qci_info->apsc)
|
||||
n += sysfs_emit_at(buf, n, "APSC ");
|
||||
if (ap_qci_info->apxa)
|
||||
n += sysfs_emit_at(buf, n, "APXA ");
|
||||
if (ap_qci_info->qact)
|
||||
n += sysfs_emit_at(buf, n, "QACT ");
|
||||
if (ap_qci_info->rc8a)
|
||||
n += sysfs_emit_at(buf, n, "RC8A ");
|
||||
if (ap_qci_info->apsb)
|
||||
n += sysfs_emit_at(buf, n, "APSB ");
|
||||
|
||||
sysfs_emit_at(buf, n == 0 ? 0 : n - 1, "\n");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static BUS_ATTR_RO(features);
|
||||
|
||||
static struct attribute *ap_bus_attrs[] = {
|
||||
&bus_attr_ap_domain.attr,
|
||||
&bus_attr_ap_control_domain_mask.attr,
|
||||
|
@ -1567,6 +1610,7 @@ static struct attribute *ap_bus_attrs[] = {
|
|||
&bus_attr_aqmask.attr,
|
||||
&bus_attr_scans.attr,
|
||||
&bus_attr_bindings.attr,
|
||||
&bus_attr_features.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(ap_bus);
|
||||
|
@ -1762,12 +1806,12 @@ static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
|
|||
*/
|
||||
static inline void ap_scan_domains(struct ap_card *ac)
|
||||
{
|
||||
bool decfg, chkstop;
|
||||
ap_qid_t qid;
|
||||
unsigned int func;
|
||||
struct device *dev;
|
||||
struct ap_queue *aq;
|
||||
int rc, dom, depth, type, ml;
|
||||
bool decfg, chkstop;
|
||||
struct ap_queue *aq;
|
||||
struct device *dev;
|
||||
unsigned int func;
|
||||
ap_qid_t qid;
|
||||
|
||||
/*
|
||||
* Go through the configuration for the domains and compare them
|
||||
|
@ -1786,20 +1830,24 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
|||
AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
|
||||
__func__, ac->id, dom);
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
continue;
|
||||
goto put_dev_and_continue;
|
||||
}
|
||||
/* domain is valid, get info from this APQN */
|
||||
if (!ap_queue_info(qid, &type, &func, &depth,
|
||||
&ml, &decfg, &chkstop)) {
|
||||
if (aq) {
|
||||
rc = ap_queue_info(qid, &type, &func, &depth,
|
||||
&ml, &decfg, &chkstop);
|
||||
switch (rc) {
|
||||
case -1:
|
||||
if (dev) {
|
||||
AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
|
||||
__func__, ac->id, dom);
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
continue;
|
||||
fallthrough;
|
||||
case 0:
|
||||
goto put_dev_and_continue;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
/* if no queue device exists, create a new one */
|
||||
if (!aq) {
|
||||
|
@ -1915,12 +1963,12 @@ put_dev_and_continue:
|
|||
*/
|
||||
static inline void ap_scan_adapter(int ap)
|
||||
{
|
||||
bool decfg, chkstop;
|
||||
ap_qid_t qid;
|
||||
unsigned int func;
|
||||
struct device *dev;
|
||||
struct ap_card *ac;
|
||||
int rc, dom, depth, type, comp_type, ml;
|
||||
bool decfg, chkstop;
|
||||
struct ap_card *ac;
|
||||
struct device *dev;
|
||||
unsigned int func;
|
||||
ap_qid_t qid;
|
||||
|
||||
/* Is there currently a card device for this adapter ? */
|
||||
dev = bus_find_device(&ap_bus_type, NULL,
|
||||
|
@ -1950,11 +1998,11 @@ static inline void ap_scan_adapter(int ap)
|
|||
if (ap_test_config_usage_domain(dom)) {
|
||||
qid = AP_MKQID(ap, dom);
|
||||
if (ap_queue_info(qid, &type, &func, &depth,
|
||||
&ml, &decfg, &chkstop))
|
||||
&ml, &decfg, &chkstop) > 0)
|
||||
break;
|
||||
}
|
||||
if (dom > ap_max_domain_id) {
|
||||
/* Could not find a valid APQN for this adapter */
|
||||
/* Could not find one valid APQN for this adapter */
|
||||
if (ac) {
|
||||
AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
|
||||
__func__, ap);
|
||||
|
@ -1979,7 +2027,6 @@ static inline void ap_scan_adapter(int ap)
|
|||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (ac) {
|
||||
/* Check APQN against existing card device for changes */
|
||||
if (ac->raw_hwtype != type) {
|
||||
|
@ -1988,9 +2035,10 @@ static inline void ap_scan_adapter(int ap)
|
|||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
ac = NULL;
|
||||
} else if (ac->functions != func) {
|
||||
} else if ((ac->functions & TAPQ_CARD_FUNC_CMP_MASK) !=
|
||||
(func & TAPQ_CARD_FUNC_CMP_MASK)) {
|
||||
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
|
||||
__func__, ap, type);
|
||||
__func__, ap, func);
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
ac = NULL;
|
||||
|
@ -2245,7 +2293,7 @@ static int __init ap_module_init(void)
|
|||
* If we are running under z/VM adjust polling to z/VM polling rate.
|
||||
*/
|
||||
if (MACHINE_IS_VM)
|
||||
poll_timeout = 1500000;
|
||||
poll_high_timeout = 1500000;
|
||||
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
ap_poll_timer.function = ap_poll_timeout;
|
||||
|
||||
|
|
|
@ -39,22 +39,32 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
|
|||
return (*ptr & (0x80000000u >> nr)) != 0;
|
||||
}
|
||||
|
||||
#define AP_RESPONSE_NORMAL 0x00
|
||||
#define AP_RESPONSE_Q_NOT_AVAIL 0x01
|
||||
#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
|
||||
#define AP_RESPONSE_DECONFIGURED 0x03
|
||||
#define AP_RESPONSE_CHECKSTOPPED 0x04
|
||||
#define AP_RESPONSE_BUSY 0x05
|
||||
#define AP_RESPONSE_INVALID_ADDRESS 0x06
|
||||
#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
|
||||
#define AP_RESPONSE_INVALID_GISA 0x08
|
||||
#define AP_RESPONSE_Q_FULL 0x10
|
||||
#define AP_RESPONSE_NO_PENDING_REPLY 0x10
|
||||
#define AP_RESPONSE_INDEX_TOO_BIG 0x11
|
||||
#define AP_RESPONSE_NO_FIRST_PART 0x13
|
||||
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
|
||||
#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
|
||||
#define AP_RESPONSE_INVALID_DOMAIN 0x42
|
||||
#define AP_RESPONSE_NORMAL 0x00
|
||||
#define AP_RESPONSE_Q_NOT_AVAIL 0x01
|
||||
#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
|
||||
#define AP_RESPONSE_DECONFIGURED 0x03
|
||||
#define AP_RESPONSE_CHECKSTOPPED 0x04
|
||||
#define AP_RESPONSE_BUSY 0x05
|
||||
#define AP_RESPONSE_INVALID_ADDRESS 0x06
|
||||
#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
|
||||
#define AP_RESPONSE_INVALID_GISA 0x08
|
||||
#define AP_RESPONSE_Q_BOUND_TO_ANOTHER 0x09
|
||||
#define AP_RESPONSE_STATE_CHANGE_IN_PROGRESS 0x0A
|
||||
#define AP_RESPONSE_Q_NOT_BOUND 0x0B
|
||||
#define AP_RESPONSE_Q_FULL 0x10
|
||||
#define AP_RESPONSE_NO_PENDING_REPLY 0x10
|
||||
#define AP_RESPONSE_INDEX_TOO_BIG 0x11
|
||||
#define AP_RESPONSE_NO_FIRST_PART 0x13
|
||||
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
|
||||
#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
|
||||
#define AP_RESPONSE_Q_BIND_ERROR 0x30
|
||||
#define AP_RESPONSE_Q_NOT_AVAIL_FOR_ASSOC 0x31
|
||||
#define AP_RESPONSE_Q_NOT_EMPTY 0x32
|
||||
#define AP_RESPONSE_BIND_LIMIT_EXCEEDED 0x33
|
||||
#define AP_RESPONSE_INVALID_ASSOC_SECRET 0x34
|
||||
#define AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE 0x35
|
||||
#define AP_RESPONSE_ASSOC_FAILED 0x36
|
||||
#define AP_RESPONSE_INVALID_DOMAIN 0x42
|
||||
|
||||
/*
|
||||
* Known device types
|
||||
|
@ -92,6 +102,7 @@ enum ap_sm_state {
|
|||
AP_SM_STATE_IDLE,
|
||||
AP_SM_STATE_WORKING,
|
||||
AP_SM_STATE_QUEUE_FULL,
|
||||
AP_SM_STATE_ASSOC_WAIT,
|
||||
NR_AP_SM_STATES
|
||||
};
|
||||
|
||||
|
@ -108,10 +119,11 @@ enum ap_sm_event {
|
|||
* AP queue state wait behaviour
|
||||
*/
|
||||
enum ap_sm_wait {
|
||||
AP_SM_WAIT_AGAIN = 0, /* retry immediately */
|
||||
AP_SM_WAIT_TIMEOUT, /* wait for timeout */
|
||||
AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
|
||||
AP_SM_WAIT_NONE, /* no wait */
|
||||
AP_SM_WAIT_AGAIN = 0, /* retry immediately */
|
||||
AP_SM_WAIT_HIGH_TIMEOUT, /* poll high freq, wait for timeout */
|
||||
AP_SM_WAIT_LOW_TIMEOUT, /* poll low freq, wait for timeout */
|
||||
AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
|
||||
AP_SM_WAIT_NONE, /* no wait */
|
||||
NR_AP_SM_WAIT
|
||||
};
|
||||
|
||||
|
@ -178,7 +190,7 @@ struct ap_device {
|
|||
struct ap_card {
|
||||
struct ap_device ap_dev;
|
||||
int raw_hwtype; /* AP raw hardware type. */
|
||||
unsigned int functions; /* AP device function bitfield. */
|
||||
unsigned int functions; /* TAPQ GR2 upper 32 facility bits */
|
||||
int queue_depth; /* AP queue depth.*/
|
||||
int id; /* AP card number. */
|
||||
unsigned int maxmsgsize; /* AP msg limit for this card */
|
||||
|
@ -187,6 +199,9 @@ struct ap_card {
|
|||
atomic64_t total_request_count; /* # requests ever for this AP device.*/
|
||||
};
|
||||
|
||||
#define TAPQ_CARD_FUNC_CMP_MASK 0xFFFF0000
|
||||
#define ASSOC_IDX_INVALID 0x10000
|
||||
|
||||
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
|
||||
|
||||
struct ap_queue {
|
||||
|
@ -199,6 +214,7 @@ struct ap_queue {
|
|||
bool chkstop; /* checkstop state */
|
||||
ap_qid_t qid; /* AP queue id. */
|
||||
bool interrupt; /* indicate if interrupts are enabled */
|
||||
unsigned int assoc_idx; /* SE association index */
|
||||
int queue_count; /* # messages currently on AP queue. */
|
||||
int pendingq_count; /* # requests on pendingq list. */
|
||||
int requestq_count; /* # requests on requestq list. */
|
||||
|
@ -209,6 +225,7 @@ struct ap_queue {
|
|||
struct list_head requestq; /* List of message yet to be sent. */
|
||||
struct ap_message *reply; /* Per device reply message. */
|
||||
enum ap_sm_state sm_state; /* ap queue state machine state */
|
||||
int rapq_fbit; /* fbit arg for next rapq invocation */
|
||||
int last_err_rc; /* last error state response code */
|
||||
};
|
||||
|
||||
|
@ -242,10 +259,10 @@ enum ap_fi_flags {
|
|||
|
||||
struct ap_message {
|
||||
struct list_head list; /* Request queueing. */
|
||||
unsigned long long psmid; /* Message id. */
|
||||
unsigned long psmid; /* Message id. */
|
||||
void *msg; /* Pointer to message buffer. */
|
||||
unsigned int len; /* actual msg len in msg buffer */
|
||||
unsigned int bufsize; /* allocated msg buffer size */
|
||||
size_t len; /* actual msg len in msg buffer */
|
||||
size_t bufsize; /* allocated msg buffer size */
|
||||
u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
|
||||
struct ap_fi fi; /* Failure Injection cmd */
|
||||
int rc; /* Return code for this message */
|
||||
|
@ -285,8 +302,8 @@ static inline void ap_release_message(struct ap_message *ap_msg)
|
|||
* for the first time. Otherwise the ap message queue will get
|
||||
* confused.
|
||||
*/
|
||||
int ap_send(ap_qid_t, unsigned long long, void *, size_t);
|
||||
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
|
||||
int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen);
|
||||
int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen);
|
||||
|
||||
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
|
||||
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
|
||||
|
@ -296,6 +313,7 @@ void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
|
|||
void ap_flush_queue(struct ap_queue *aq);
|
||||
|
||||
void *ap_airq_ptr(void);
|
||||
int ap_sb_available(void);
|
||||
void ap_wait(enum ap_sm_wait wait);
|
||||
void ap_request_timeout(struct timer_list *t);
|
||||
void ap_bus_force_rescan(void);
|
||||
|
|
|
@ -24,7 +24,7 @@ static ssize_t hwtype_show(struct device *dev,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
|
||||
return sysfs_emit(buf, "%d\n", ac->ap_dev.device_type);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(hwtype);
|
||||
|
@ -34,7 +34,7 @@ static ssize_t raw_hwtype_show(struct device *dev,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
|
||||
return sysfs_emit(buf, "%d\n", ac->raw_hwtype);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(raw_hwtype);
|
||||
|
@ -44,7 +44,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
|
||||
return sysfs_emit(buf, "%d\n", ac->queue_depth);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(depth);
|
||||
|
@ -54,7 +54,7 @@ static ssize_t ap_functions_show(struct device *dev,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
|
||||
return sysfs_emit(buf, "0x%08X\n", ac->functions);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(ap_functions);
|
||||
|
@ -70,7 +70,7 @@ static ssize_t request_count_show(struct device *dev,
|
|||
spin_lock_bh(&ap_queues_lock);
|
||||
req_cnt = atomic64_read(&ac->total_request_count);
|
||||
spin_unlock_bh(&ap_queues_lock);
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
|
||||
return sysfs_emit(buf, "%llu\n", req_cnt);
|
||||
}
|
||||
|
||||
static ssize_t request_count_store(struct device *dev,
|
||||
|
@ -107,7 +107,7 @@ static ssize_t requestq_count_show(struct device *dev,
|
|||
if (ac == aq->card)
|
||||
reqq_cnt += aq->requestq_count;
|
||||
spin_unlock_bh(&ap_queues_lock);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
|
||||
return sysfs_emit(buf, "%d\n", reqq_cnt);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(requestq_count);
|
||||
|
@ -126,7 +126,7 @@ static ssize_t pendingq_count_show(struct device *dev,
|
|||
if (ac == aq->card)
|
||||
penq_cnt += aq->pendingq_count;
|
||||
spin_unlock_bh(&ap_queues_lock);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
|
||||
return sysfs_emit(buf, "%d\n", penq_cnt);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(pendingq_count);
|
||||
|
@ -134,8 +134,7 @@ static DEVICE_ATTR_RO(pendingq_count);
|
|||
static ssize_t modalias_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
|
||||
to_ap_dev(dev)->device_type);
|
||||
return sysfs_emit(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(modalias);
|
||||
|
@ -145,7 +144,7 @@ static ssize_t config_show(struct device *dev,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0);
|
||||
return sysfs_emit(buf, "%d\n", ac->config ? 1 : 0);
|
||||
}
|
||||
|
||||
static ssize_t config_store(struct device *dev,
|
||||
|
@ -179,7 +178,7 @@ static ssize_t chkstop_show(struct device *dev,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ac->chkstop ? 1 : 0);
|
||||
return sysfs_emit(buf, "%d\n", ac->chkstop ? 1 : 0);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(chkstop);
|
||||
|
@ -189,7 +188,7 @@ static ssize_t max_msg_size_show(struct device *dev,
|
|||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", ac->maxmsgsize);
|
||||
return sysfs_emit(buf, "%u\n", ac->maxmsgsize);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(max_msg_size);
|
||||
|
|
|
@ -18,6 +18,21 @@
|
|||
|
||||
static void __ap_flush_queue(struct ap_queue *aq);
|
||||
|
||||
/*
|
||||
* some AP queue helper functions
|
||||
*/
|
||||
|
||||
static inline bool ap_q_supports_bind(struct ap_queue *aq)
|
||||
{
|
||||
return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
|
||||
ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
|
||||
}
|
||||
|
||||
static inline bool ap_q_supports_assoc(struct ap_queue *aq)
|
||||
{
|
||||
return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
|
||||
* @aq: The AP queue
|
||||
|
@ -35,6 +50,8 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
|
|||
qirqctrl.ir = 1;
|
||||
qirqctrl.isc = AP_ISC;
|
||||
status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
|
||||
if (status.async)
|
||||
return -EPERM;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
case AP_RESPONSE_OTHERWISE_CHANGED:
|
||||
|
@ -59,7 +76,7 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
|
|||
* @qid: The AP queue number
|
||||
* @psmid: The program supplied message identifier
|
||||
* @msg: The message text
|
||||
* @length: The message length
|
||||
* @msglen: The message length
|
||||
* @special: Special Bit
|
||||
*
|
||||
* Returns AP queue status structure.
|
||||
|
@ -68,19 +85,21 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
|
|||
* because a segment boundary was reached. The NQAP is repeated.
|
||||
*/
|
||||
static inline struct ap_queue_status
|
||||
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
|
||||
__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
|
||||
int special)
|
||||
{
|
||||
if (special)
|
||||
qid |= 0x400000UL;
|
||||
return ap_nqap(qid, psmid, msg, length);
|
||||
return ap_nqap(qid, psmid, msg, msglen);
|
||||
}
|
||||
|
||||
int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
|
||||
int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
|
||||
status = __ap_send(qid, psmid, msg, length, 0);
|
||||
status = __ap_send(qid, psmid, msg, msglen, 0);
|
||||
if (status.async)
|
||||
return -EPERM;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
return 0;
|
||||
|
@ -95,13 +114,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
|
|||
}
|
||||
EXPORT_SYMBOL(ap_send);
|
||||
|
||||
int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
|
||||
int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
|
||||
if (!msg)
|
||||
return -EINVAL;
|
||||
status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
|
||||
status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
|
||||
if (status.async)
|
||||
return -EPERM;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
return 0;
|
||||
|
@ -150,7 +171,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
|
|||
do {
|
||||
status = ap_dqap(aq->qid, &aq->reply->psmid,
|
||||
aq->reply->msg, aq->reply->bufsize,
|
||||
&reslen, &resgr0);
|
||||
&aq->reply->len, &reslen, &resgr0);
|
||||
parts++;
|
||||
} while (status.response_code == 0xFF && resgr0 != 0);
|
||||
|
||||
|
@ -177,7 +198,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
|
|||
break;
|
||||
}
|
||||
if (!found) {
|
||||
AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
|
||||
AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
|
||||
__func__, aq->reply->psmid,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
}
|
||||
|
@ -210,6 +231,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
|
|||
if (!aq->reply)
|
||||
return AP_SM_WAIT_NONE;
|
||||
status = ap_sm_recv(aq);
|
||||
if (status.async)
|
||||
return AP_SM_WAIT_NONE;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
if (aq->queue_count > 0) {
|
||||
|
@ -221,7 +244,7 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
|
|||
case AP_RESPONSE_NO_PENDING_REPLY:
|
||||
if (aq->queue_count > 0)
|
||||
return aq->interrupt ?
|
||||
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
|
||||
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
|
||||
aq->sm_state = AP_SM_STATE_IDLE;
|
||||
return AP_SM_WAIT_NONE;
|
||||
default:
|
||||
|
@ -261,6 +284,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
|
|||
status = __ap_send(qid, ap_msg->psmid,
|
||||
ap_msg->msg, ap_msg->len,
|
||||
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
|
||||
if (status.async)
|
||||
return AP_SM_WAIT_NONE;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
aq->queue_count = max_t(int, 1, aq->queue_count + 1);
|
||||
|
@ -277,10 +302,10 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
|
|||
case AP_RESPONSE_Q_FULL:
|
||||
aq->sm_state = AP_SM_STATE_QUEUE_FULL;
|
||||
return aq->interrupt ?
|
||||
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
|
||||
AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
aq->sm_state = AP_SM_STATE_RESET_WAIT;
|
||||
return AP_SM_WAIT_TIMEOUT;
|
||||
return AP_SM_WAIT_LOW_TIMEOUT;
|
||||
case AP_RESPONSE_INVALID_DOMAIN:
|
||||
AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
|
||||
fallthrough;
|
||||
|
@ -322,13 +347,16 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
|
|||
{
|
||||
struct ap_queue_status status;
|
||||
|
||||
status = ap_rapq(aq->qid);
|
||||
status = ap_rapq(aq->qid, aq->rapq_fbit);
|
||||
if (status.async)
|
||||
return AP_SM_WAIT_NONE;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
aq->sm_state = AP_SM_STATE_RESET_WAIT;
|
||||
aq->interrupt = false;
|
||||
return AP_SM_WAIT_TIMEOUT;
|
||||
aq->rapq_fbit = 0;
|
||||
return AP_SM_WAIT_LOW_TIMEOUT;
|
||||
default:
|
||||
aq->dev_state = AP_DEV_STATE_ERROR;
|
||||
aq->last_err_rc = status.response_code;
|
||||
|
@ -368,7 +396,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
|
|||
return AP_SM_WAIT_AGAIN;
|
||||
case AP_RESPONSE_BUSY:
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
return AP_SM_WAIT_TIMEOUT;
|
||||
return AP_SM_WAIT_LOW_TIMEOUT;
|
||||
case AP_RESPONSE_Q_NOT_AVAIL:
|
||||
case AP_RESPONSE_DECONFIGURED:
|
||||
case AP_RESPONSE_CHECKSTOPPED:
|
||||
|
@ -412,7 +440,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
|
|||
return AP_SM_WAIT_AGAIN;
|
||||
fallthrough;
|
||||
case AP_RESPONSE_NO_PENDING_REPLY:
|
||||
return AP_SM_WAIT_TIMEOUT;
|
||||
return AP_SM_WAIT_LOW_TIMEOUT;
|
||||
default:
|
||||
aq->dev_state = AP_DEV_STATE_ERROR;
|
||||
aq->last_err_rc = status.response_code;
|
||||
|
@ -423,6 +451,59 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_sm_assoc_wait(): Test queue for completion of a pending
|
||||
* association request.
|
||||
* @aq: pointer to the AP queue
|
||||
*/
|
||||
static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
struct ap_tapq_gr2 info;
|
||||
|
||||
status = ap_test_queue(aq->qid, 1, &info);
|
||||
/* handle asynchronous error on this queue */
|
||||
if (status.async && status.response_code) {
|
||||
aq->dev_state = AP_DEV_STATE_ERROR;
|
||||
aq->last_err_rc = status.response_code;
|
||||
AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return AP_SM_WAIT_NONE;
|
||||
}
|
||||
if (status.response_code > AP_RESPONSE_BUSY) {
|
||||
aq->dev_state = AP_DEV_STATE_ERROR;
|
||||
aq->last_err_rc = status.response_code;
|
||||
AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return AP_SM_WAIT_NONE;
|
||||
}
|
||||
|
||||
/* check bs bits */
|
||||
switch (info.bs) {
|
||||
case AP_BS_Q_USABLE:
|
||||
/* association is through */
|
||||
aq->sm_state = AP_SM_STATE_IDLE;
|
||||
AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
|
||||
__func__, AP_QID_CARD(aq->qid),
|
||||
AP_QID_QUEUE(aq->qid), aq->assoc_idx);
|
||||
return AP_SM_WAIT_NONE;
|
||||
case AP_BS_Q_USABLE_NO_SECURE_KEY:
|
||||
/* association still pending */
|
||||
return AP_SM_WAIT_LOW_TIMEOUT;
|
||||
default:
|
||||
/* reset from 'outside' happened or no idea at all */
|
||||
aq->assoc_idx = ASSOC_IDX_INVALID;
|
||||
aq->dev_state = AP_DEV_STATE_ERROR;
|
||||
aq->last_err_rc = status.response_code;
|
||||
AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
|
||||
__func__, info.bs,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return AP_SM_WAIT_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* AP state machine jump table
|
||||
*/
|
||||
|
@ -451,6 +532,10 @@ static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
|
|||
[AP_SM_EVENT_POLL] = ap_sm_read,
|
||||
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
|
||||
},
|
||||
[AP_SM_STATE_ASSOC_WAIT] = {
|
||||
[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
|
||||
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
|
||||
},
|
||||
};
|
||||
|
||||
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
|
||||
|
@ -490,9 +575,9 @@ static ssize_t request_count_show(struct device *dev,
|
|||
spin_unlock_bh(&aq->lock);
|
||||
|
||||
if (valid)
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
|
||||
return sysfs_emit(buf, "%llu\n", req_cnt);
|
||||
else
|
||||
return scnprintf(buf, PAGE_SIZE, "-\n");
|
||||
return sysfs_emit(buf, "-\n");
|
||||
}
|
||||
|
||||
static ssize_t request_count_store(struct device *dev,
|
||||
|
@ -520,7 +605,7 @@ static ssize_t requestq_count_show(struct device *dev,
|
|||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
|
||||
reqq_cnt = aq->requestq_count;
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
|
||||
return sysfs_emit(buf, "%d\n", reqq_cnt);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(requestq_count);
|
||||
|
@ -535,7 +620,7 @@ static ssize_t pendingq_count_show(struct device *dev,
|
|||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
|
||||
penq_cnt = aq->pendingq_count;
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
|
||||
return sysfs_emit(buf, "%d\n", penq_cnt);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(pendingq_count);
|
||||
|
@ -550,14 +635,14 @@ static ssize_t reset_show(struct device *dev,
|
|||
switch (aq->sm_state) {
|
||||
case AP_SM_STATE_RESET_START:
|
||||
case AP_SM_STATE_RESET_WAIT:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
|
||||
rc = sysfs_emit(buf, "Reset in progress.\n");
|
||||
break;
|
||||
case AP_SM_STATE_WORKING:
|
||||
case AP_SM_STATE_QUEUE_FULL:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
|
||||
rc = sysfs_emit(buf, "Reset Timer armed.\n");
|
||||
break;
|
||||
default:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
|
||||
rc = sysfs_emit(buf, "No Reset Timer set.\n");
|
||||
}
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return rc;
|
||||
|
@ -591,11 +676,11 @@ static ssize_t interrupt_show(struct device *dev,
|
|||
|
||||
spin_lock_bh(&aq->lock);
|
||||
if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
|
||||
rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
|
||||
rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
|
||||
else if (aq->interrupt)
|
||||
rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
|
||||
rc = sysfs_emit(buf, "Interrupts enabled.\n");
|
||||
else
|
||||
rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
|
||||
rc = sysfs_emit(buf, "Interrupts disabled.\n");
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -609,7 +694,7 @@ static ssize_t config_show(struct device *dev,
|
|||
int rc;
|
||||
|
||||
spin_lock_bh(&aq->lock);
|
||||
rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
|
||||
rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -623,13 +708,33 @@ static ssize_t chkstop_show(struct device *dev,
|
|||
int rc;
|
||||
|
||||
spin_lock_bh(&aq->lock);
|
||||
rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->chkstop ? 1 : 0);
|
||||
rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(chkstop);
|
||||
|
||||
static ssize_t ap_functions_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
struct ap_queue_status status;
|
||||
struct ap_tapq_gr2 info;
|
||||
|
||||
status = ap_test_queue(aq->qid, 1, &info);
|
||||
if (status.response_code > AP_RESPONSE_BUSY) {
|
||||
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return sysfs_emit(buf, "0x%08X\n", info.fac);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(ap_functions);
|
||||
|
||||
#ifdef CONFIG_ZCRYPT_DEBUG
|
||||
static ssize_t states_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -641,50 +746,46 @@ static ssize_t states_show(struct device *dev,
|
|||
/* queue device state */
|
||||
switch (aq->dev_state) {
|
||||
case AP_DEV_STATE_UNINITIATED:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
|
||||
rc = sysfs_emit(buf, "UNINITIATED\n");
|
||||
break;
|
||||
case AP_DEV_STATE_OPERATING:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
|
||||
rc = sysfs_emit(buf, "OPERATING");
|
||||
break;
|
||||
case AP_DEV_STATE_SHUTDOWN:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
|
||||
rc = sysfs_emit(buf, "SHUTDOWN");
|
||||
break;
|
||||
case AP_DEV_STATE_ERROR:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "ERROR");
|
||||
rc = sysfs_emit(buf, "ERROR");
|
||||
break;
|
||||
default:
|
||||
rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
|
||||
rc = sysfs_emit(buf, "UNKNOWN");
|
||||
}
|
||||
/* state machine state */
|
||||
if (aq->dev_state) {
|
||||
switch (aq->sm_state) {
|
||||
case AP_SM_STATE_RESET_START:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [RESET_START]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
|
||||
break;
|
||||
case AP_SM_STATE_RESET_WAIT:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [RESET_WAIT]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
|
||||
break;
|
||||
case AP_SM_STATE_SETIRQ_WAIT:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [SETIRQ_WAIT]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
|
||||
break;
|
||||
case AP_SM_STATE_IDLE:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [IDLE]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
|
||||
break;
|
||||
case AP_SM_STATE_WORKING:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [WORKING]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
|
||||
break;
|
||||
case AP_SM_STATE_QUEUE_FULL:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [FULL]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [FULL]\n");
|
||||
break;
|
||||
case AP_SM_STATE_ASSOC_WAIT:
|
||||
rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
|
||||
break;
|
||||
default:
|
||||
rc += scnprintf(buf + rc, PAGE_SIZE - rc,
|
||||
" [UNKNOWN]\n");
|
||||
rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&aq->lock);
|
||||
|
@ -705,33 +806,33 @@ static ssize_t last_err_rc_show(struct device *dev,
|
|||
|
||||
switch (rc) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
|
||||
return sysfs_emit(buf, "NORMAL\n");
|
||||
case AP_RESPONSE_Q_NOT_AVAIL:
|
||||
return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
|
||||
return sysfs_emit(buf, "Q_NOT_AVAIL\n");
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
|
||||
return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
|
||||
case AP_RESPONSE_DECONFIGURED:
|
||||
return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
|
||||
return sysfs_emit(buf, "DECONFIGURED\n");
|
||||
case AP_RESPONSE_CHECKSTOPPED:
|
||||
return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
|
||||
return sysfs_emit(buf, "CHECKSTOPPED\n");
|
||||
case AP_RESPONSE_BUSY:
|
||||
return scnprintf(buf, PAGE_SIZE, "BUSY\n");
|
||||
return sysfs_emit(buf, "BUSY\n");
|
||||
case AP_RESPONSE_INVALID_ADDRESS:
|
||||
return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
|
||||
return sysfs_emit(buf, "INVALID_ADDRESS\n");
|
||||
case AP_RESPONSE_OTHERWISE_CHANGED:
|
||||
return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
|
||||
return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
|
||||
case AP_RESPONSE_Q_FULL:
|
||||
return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
|
||||
return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
|
||||
case AP_RESPONSE_INDEX_TOO_BIG:
|
||||
return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
|
||||
return sysfs_emit(buf, "INDEX_TOO_BIG\n");
|
||||
case AP_RESPONSE_NO_FIRST_PART:
|
||||
return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
|
||||
return sysfs_emit(buf, "NO_FIRST_PART\n");
|
||||
case AP_RESPONSE_MESSAGE_TOO_BIG:
|
||||
return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
|
||||
return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
|
||||
case AP_RESPONSE_REQ_FAC_NOT_INST:
|
||||
return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
|
||||
return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
|
||||
default:
|
||||
return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
|
||||
return sysfs_emit(buf, "response code %d\n", rc);
|
||||
}
|
||||
}
|
||||
static DEVICE_ATTR_RO(last_err_rc);
|
||||
|
@ -745,6 +846,7 @@ static struct attribute *ap_queue_dev_attrs[] = {
|
|||
&dev_attr_interrupt.attr,
|
||||
&dev_attr_config.attr,
|
||||
&dev_attr_chkstop.attr,
|
||||
&dev_attr_ap_functions.attr,
|
||||
#ifdef CONFIG_ZCRYPT_DEBUG
|
||||
&dev_attr_states.attr,
|
||||
&dev_attr_last_err_rc.attr,
|
||||
|
@ -766,6 +868,186 @@ static struct device_type ap_queue_type = {
|
|||
.groups = ap_queue_dev_attr_groups,
|
||||
};
|
||||
|
||||
static ssize_t se_bind_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
struct ap_queue_status status;
|
||||
struct ap_tapq_gr2 info;
|
||||
|
||||
if (!ap_q_supports_bind(aq))
|
||||
return sysfs_emit(buf, "-\n");
|
||||
|
||||
status = ap_test_queue(aq->qid, 1, &info);
|
||||
if (status.response_code > AP_RESPONSE_BUSY) {
|
||||
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return -EIO;
|
||||
}
|
||||
switch (info.bs) {
|
||||
case AP_BS_Q_USABLE:
|
||||
case AP_BS_Q_USABLE_NO_SECURE_KEY:
|
||||
return sysfs_emit(buf, "bound\n");
|
||||
default:
|
||||
return sysfs_emit(buf, "unbound\n");
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t se_bind_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
struct ap_queue_status status;
|
||||
bool value;
|
||||
int rc;
|
||||
|
||||
if (!ap_q_supports_bind(aq))
|
||||
return -EINVAL;
|
||||
|
||||
/* only 0 (unbind) and 1 (bind) allowed */
|
||||
rc = kstrtobool(buf, &value);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (value) {
|
||||
/* bind, do BAPQ */
|
||||
spin_lock_bh(&aq->lock);
|
||||
if (aq->sm_state < AP_SM_STATE_IDLE) {
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
status = ap_bapq(aq->qid);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
if (status.response_code) {
|
||||
AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid),
|
||||
AP_QID_QUEUE(aq->qid));
|
||||
return -EIO;
|
||||
}
|
||||
} else {
|
||||
/* unbind, set F bit arg and trigger RAPQ */
|
||||
spin_lock_bh(&aq->lock);
|
||||
__ap_flush_queue(aq);
|
||||
aq->rapq_fbit = 1;
|
||||
aq->assoc_idx = ASSOC_IDX_INVALID;
|
||||
aq->sm_state = AP_SM_STATE_RESET_START;
|
||||
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
|
||||
spin_unlock_bh(&aq->lock);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RW(se_bind);
|
||||
|
||||
static ssize_t se_associate_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
struct ap_queue_status status;
|
||||
struct ap_tapq_gr2 info;
|
||||
|
||||
if (!ap_q_supports_assoc(aq))
|
||||
return sysfs_emit(buf, "-\n");
|
||||
|
||||
status = ap_test_queue(aq->qid, 1, &info);
|
||||
if (status.response_code > AP_RESPONSE_BUSY) {
|
||||
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
switch (info.bs) {
|
||||
case AP_BS_Q_USABLE:
|
||||
if (aq->assoc_idx == ASSOC_IDX_INVALID) {
|
||||
AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
|
||||
case AP_BS_Q_USABLE_NO_SECURE_KEY:
|
||||
if (aq->assoc_idx != ASSOC_IDX_INVALID)
|
||||
return sysfs_emit(buf, "association pending\n");
|
||||
fallthrough;
|
||||
default:
|
||||
return sysfs_emit(buf, "unassociated\n");
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t se_associate_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
struct ap_queue_status status;
|
||||
unsigned int value;
|
||||
int rc;
|
||||
|
||||
if (!ap_q_supports_assoc(aq))
|
||||
return -EINVAL;
|
||||
|
||||
/* association index needs to be >= 0 */
|
||||
rc = kstrtouint(buf, 0, &value);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (value >= ASSOC_IDX_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&aq->lock);
|
||||
|
||||
/* sm should be in idle state */
|
||||
if (aq->sm_state != AP_SM_STATE_IDLE) {
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* already associated or association pending ? */
|
||||
if (aq->assoc_idx != ASSOC_IDX_INVALID) {
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* trigger the asynchronous association request */
|
||||
status = ap_aapq(aq->qid, value);
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
|
||||
aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
|
||||
aq->assoc_idx = value;
|
||||
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
|
||||
spin_unlock_bh(&aq->lock);
|
||||
break;
|
||||
default:
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RW(se_associate);
|
||||
|
||||
static struct attribute *ap_queue_dev_sb_attrs[] = {
|
||||
&dev_attr_se_bind.attr,
|
||||
&dev_attr_se_associate.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group ap_queue_dev_sb_attr_group = {
|
||||
.attrs = ap_queue_dev_sb_attrs
|
||||
};
|
||||
|
||||
static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
|
||||
&ap_queue_dev_sb_attr_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
static void ap_queue_device_release(struct device *dev)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
|
@ -787,6 +1069,9 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
|
|||
aq->ap_dev.device.release = ap_queue_device_release;
|
||||
aq->ap_dev.device.type = &ap_queue_type;
|
||||
aq->ap_dev.device_type = device_type;
|
||||
// add optional SE secure binding attributes group
|
||||
if (ap_sb_available() && is_prot_virt_guest())
|
||||
aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
|
||||
aq->qid = qid;
|
||||
aq->interrupt = false;
|
||||
spin_lock_init(&aq->lock);
|
||||
|
@ -922,7 +1207,7 @@ void ap_queue_remove(struct ap_queue *aq)
|
|||
* to the initial value AP_DEV_STATE_UNINITIATED.
|
||||
*/
|
||||
spin_lock_bh(&aq->lock);
|
||||
ap_zapq(aq->qid);
|
||||
ap_zapq(aq->qid, 0);
|
||||
aq->dev_state = AP_DEV_STATE_UNINITIATED;
|
||||
spin_unlock_bh(&aq->lock);
|
||||
}
|
||||
|
@ -933,6 +1218,7 @@ void ap_queue_init_state(struct ap_queue *aq)
|
|||
aq->dev_state = AP_DEV_STATE_OPERATING;
|
||||
aq->sm_state = AP_SM_STATE_RESET_START;
|
||||
aq->last_err_rc = 0;
|
||||
aq->assoc_idx = ASSOC_IDX_INVALID;
|
||||
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
|
||||
spin_unlock_bh(&aq->lock);
|
||||
}
|
||||
|
|
|
@ -60,14 +60,8 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
|
|||
kfree(matrix_dev);
|
||||
}
|
||||
|
||||
static int matrix_bus_match(struct device *dev, struct device_driver *drv)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct bus_type matrix_bus = {
|
||||
.name = "matrix",
|
||||
.match = &matrix_bus_match,
|
||||
};
|
||||
|
||||
static struct device_driver matrix_driver = {
|
||||
|
|
|
@ -599,9 +599,9 @@ out_unlock:
|
|||
static void vfio_ap_matrix_init(struct ap_config_info *info,
|
||||
struct ap_matrix *matrix)
|
||||
{
|
||||
matrix->apm_max = info->apxa ? info->Na : 63;
|
||||
matrix->aqm_max = info->apxa ? info->Nd : 15;
|
||||
matrix->adm_max = info->apxa ? info->Nd : 15;
|
||||
matrix->apm_max = info->apxa ? info->na : 63;
|
||||
matrix->aqm_max = info->apxa ? info->nd : 15;
|
||||
matrix->adm_max = info->apxa ? info->nd : 15;
|
||||
}
|
||||
|
||||
static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
|
||||
|
@ -1657,7 +1657,7 @@ static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
|
|||
if (!q)
|
||||
return 0;
|
||||
retry_zapq:
|
||||
status = ap_zapq(q->apqn);
|
||||
status = ap_zapq(q->apqn, 0);
|
||||
q->reset_rc = status.response_code;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
|
@ -2115,8 +2115,8 @@ static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
|
|||
{
|
||||
bool apid_cleared;
|
||||
struct ap_queue_status status;
|
||||
unsigned long apid, apqi, info;
|
||||
int qtype, qtype_mask = 0xff000000;
|
||||
unsigned long apid, apqi;
|
||||
struct ap_tapq_gr2 info;
|
||||
|
||||
for_each_set_bit_inv(apid, apm, AP_DEVICES) {
|
||||
apid_cleared = false;
|
||||
|
@ -2133,15 +2133,13 @@ static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
|
|||
case AP_RESPONSE_DECONFIGURED:
|
||||
case AP_RESPONSE_CHECKSTOPPED:
|
||||
case AP_RESPONSE_BUSY:
|
||||
qtype = info & qtype_mask;
|
||||
|
||||
/*
|
||||
* The vfio_ap device driver only
|
||||
* supports CEX4 and newer adapters, so
|
||||
* remove the APID if the adapter is
|
||||
* older than a CEX4.
|
||||
*/
|
||||
if (qtype < AP_DEVICE_TYPE_CEX4) {
|
||||
if (info.at < AP_DEVICE_TYPE_CEX4) {
|
||||
clear_bit_inv(apid, apm);
|
||||
apid_cleared = true;
|
||||
}
|
||||
|
|
|
@ -159,25 +159,20 @@ static ssize_t ioctlmask_show(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int i, rc;
|
||||
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
|
||||
int i, n;
|
||||
|
||||
if (mutex_lock_interruptible(&ap_perms_mutex))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
buf[0] = '0';
|
||||
buf[1] = 'x';
|
||||
n = sysfs_emit(buf, "0x");
|
||||
for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
|
||||
snprintf(buf + 2 + 2 * i * sizeof(long),
|
||||
PAGE_SIZE - 2 - 2 * i * sizeof(long),
|
||||
"%016lx", zcdndev->perms.ioctlm[i]);
|
||||
buf[2 + 2 * i * sizeof(long)] = '\n';
|
||||
buf[2 + 2 * i * sizeof(long) + 1] = '\0';
|
||||
rc = 2 + 2 * i * sizeof(long) + 1;
|
||||
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
|
||||
return rc;
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t ioctlmask_store(struct device *dev,
|
||||
|
@ -201,25 +196,20 @@ static ssize_t apmask_show(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int i, rc;
|
||||
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
|
||||
int i, n;
|
||||
|
||||
if (mutex_lock_interruptible(&ap_perms_mutex))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
buf[0] = '0';
|
||||
buf[1] = 'x';
|
||||
n = sysfs_emit(buf, "0x");
|
||||
for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
|
||||
snprintf(buf + 2 + 2 * i * sizeof(long),
|
||||
PAGE_SIZE - 2 - 2 * i * sizeof(long),
|
||||
"%016lx", zcdndev->perms.apm[i]);
|
||||
buf[2 + 2 * i * sizeof(long)] = '\n';
|
||||
buf[2 + 2 * i * sizeof(long) + 1] = '\0';
|
||||
rc = 2 + 2 * i * sizeof(long) + 1;
|
||||
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
|
||||
return rc;
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t apmask_store(struct device *dev,
|
||||
|
@ -243,25 +233,20 @@ static ssize_t aqmask_show(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int i, rc;
|
||||
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
|
||||
int i, n;
|
||||
|
||||
if (mutex_lock_interruptible(&ap_perms_mutex))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
buf[0] = '0';
|
||||
buf[1] = 'x';
|
||||
n = sysfs_emit(buf, "0x");
|
||||
for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
|
||||
snprintf(buf + 2 + 2 * i * sizeof(long),
|
||||
PAGE_SIZE - 2 - 2 * i * sizeof(long),
|
||||
"%016lx", zcdndev->perms.aqm[i]);
|
||||
buf[2 + 2 * i * sizeof(long)] = '\n';
|
||||
buf[2 + 2 * i * sizeof(long) + 1] = '\0';
|
||||
rc = 2 + 2 * i * sizeof(long) + 1;
|
||||
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
|
||||
return rc;
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t aqmask_store(struct device *dev,
|
||||
|
@ -285,25 +270,20 @@ static ssize_t admask_show(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int i, rc;
|
||||
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
|
||||
int i, n;
|
||||
|
||||
if (mutex_lock_interruptible(&ap_perms_mutex))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
buf[0] = '0';
|
||||
buf[1] = 'x';
|
||||
n = sysfs_emit(buf, "0x");
|
||||
for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
|
||||
snprintf(buf + 2 + 2 * i * sizeof(long),
|
||||
PAGE_SIZE - 2 - 2 * i * sizeof(long),
|
||||
"%016lx", zcdndev->perms.adm[i]);
|
||||
buf[2 + 2 * i * sizeof(long)] = '\n';
|
||||
buf[2 + 2 * i * sizeof(long) + 1] = '\0';
|
||||
rc = 2 + 2 * i * sizeof(long) + 1;
|
||||
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
|
||||
return rc;
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t admask_store(struct device *dev,
|
||||
|
|
|
@ -41,7 +41,7 @@ static ssize_t type_show(struct device *dev,
|
|||
{
|
||||
struct zcrypt_card *zc = dev_get_drvdata(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
|
||||
return sysfs_emit(buf, "%s\n", zc->type_string);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(type);
|
||||
|
@ -54,7 +54,7 @@ static ssize_t online_show(struct device *dev,
|
|||
struct ap_card *ac = to_ap_card(dev);
|
||||
int online = ac->config && zc->online ? 1 : 0;
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
|
||||
return sysfs_emit(buf, "%d\n", online);
|
||||
}
|
||||
|
||||
static ssize_t online_store(struct device *dev,
|
||||
|
@ -118,7 +118,7 @@ static ssize_t load_show(struct device *dev,
|
|||
{
|
||||
struct zcrypt_card *zc = dev_get_drvdata(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
|
||||
return sysfs_emit(buf, "%d\n", atomic_read(&zc->load));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(load);
|
||||
|
|
|
@ -89,10 +89,7 @@ struct cca_pvt_ext_crt_sec {
|
|||
#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
|
||||
|
||||
/**
|
||||
* Set up private key fields of a type6 MEX message. The _pad variant
|
||||
* strips leading zeroes from the b_key.
|
||||
* Note that all numerics in the key token are big-endian,
|
||||
* while the entries in the key block header are little-endian.
|
||||
* Set up private key fields of a type6 MEX message.
|
||||
*
|
||||
* @mex: pointer to user input data
|
||||
* @p: pointer to memory area for the key
|
||||
|
@ -111,10 +108,9 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
|
|||
struct t6_keyblock_hdr t6_hdr;
|
||||
struct cca_token_hdr pubhdr;
|
||||
struct cca_public_sec pubsec;
|
||||
char exponent[0];
|
||||
char exponent[];
|
||||
} __packed *key = p;
|
||||
unsigned char *temp;
|
||||
int i;
|
||||
unsigned char *ptr;
|
||||
|
||||
/*
|
||||
* The inputdatalength was a selection criteria in the dispatching
|
||||
|
@ -131,37 +127,29 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
|
|||
key->pubsec = static_pub_sec;
|
||||
|
||||
/* key parameter block */
|
||||
temp = key->exponent;
|
||||
if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
|
||||
ptr = key->exponent;
|
||||
if (copy_from_user(ptr, mex->b_key, mex->inputdatalength))
|
||||
return -EFAULT;
|
||||
/* Strip leading zeroes from b_key. */
|
||||
for (i = 0; i < mex->inputdatalength; i++)
|
||||
if (temp[i])
|
||||
break;
|
||||
if (i >= mex->inputdatalength)
|
||||
return -EINVAL;
|
||||
memmove(temp, temp + i, mex->inputdatalength - i);
|
||||
temp += mex->inputdatalength - i;
|
||||
ptr += mex->inputdatalength;
|
||||
/* modulus */
|
||||
if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
|
||||
if (copy_from_user(ptr, mex->n_modulus, mex->inputdatalength))
|
||||
return -EFAULT;
|
||||
|
||||
key->pubsec.modulus_bit_len = 8 * mex->inputdatalength;
|
||||
key->pubsec.modulus_byte_len = mex->inputdatalength;
|
||||
key->pubsec.exponent_len = mex->inputdatalength - i;
|
||||
key->pubsec.exponent_len = mex->inputdatalength;
|
||||
key->pubsec.section_length = sizeof(key->pubsec) +
|
||||
2 * mex->inputdatalength - i;
|
||||
2 * mex->inputdatalength;
|
||||
key->pubhdr.token_length =
|
||||
key->pubsec.section_length + sizeof(key->pubhdr);
|
||||
key->t6_hdr.ulen = key->pubhdr.token_length + 4;
|
||||
key->t6_hdr.blen = key->pubhdr.token_length + 6;
|
||||
return sizeof(*key) + 2 * mex->inputdatalength - i;
|
||||
|
||||
return sizeof(*key) + 2 * mex->inputdatalength;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up private key fields of a type6 CRT message.
|
||||
* Note that all numerics in the key token are big-endian,
|
||||
* while the entries in the key block header are little-endian.
|
||||
*
|
||||
* @mex: pointer to user input data
|
||||
* @p: pointer to memory area for the key
|
||||
|
@ -180,7 +168,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
|
|||
struct t6_keyblock_hdr t6_hdr;
|
||||
struct cca_token_hdr token;
|
||||
struct cca_pvt_ext_crt_sec pvt;
|
||||
char key_parts[0];
|
||||
char key_parts[];
|
||||
} __packed *key = p;
|
||||
struct cca_public_sec *pub;
|
||||
int short_len, long_len, pad_len, key_len, size;
|
||||
|
@ -242,6 +230,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
|
|||
* used.
|
||||
*/
|
||||
memcpy((char *)(pub + 1), pk_exponent, 3);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
|
|
@ -450,18 +450,18 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
|
|||
char rule_array[8];
|
||||
struct lv1 {
|
||||
u16 len;
|
||||
u8 clrkey[0];
|
||||
u8 clrkey[];
|
||||
} lv1;
|
||||
struct lv2 {
|
||||
u16 len;
|
||||
struct keyid {
|
||||
u16 len;
|
||||
u16 attr;
|
||||
u8 data[SECKEYBLOBSIZE];
|
||||
} keyid;
|
||||
} lv2;
|
||||
/* followed by struct lv2 */
|
||||
} __packed * preqparm;
|
||||
struct lv2 *plv2;
|
||||
struct lv2 {
|
||||
u16 len;
|
||||
struct keyid {
|
||||
u16 len;
|
||||
u16 attr;
|
||||
u8 data[SECKEYBLOBSIZE];
|
||||
} keyid;
|
||||
} __packed * plv2;
|
||||
struct cmrepparm {
|
||||
u8 subfunc_code[2];
|
||||
u16 rule_array_len;
|
||||
|
@ -512,11 +512,11 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
|
|||
}
|
||||
preqparm->lv1.len = sizeof(struct lv1) + keysize;
|
||||
memcpy(preqparm->lv1.clrkey, clrkey, keysize);
|
||||
plv2 = (struct lv2 *)(((u8 *)&preqparm->lv2) + keysize);
|
||||
plv2 = (struct lv2 *)(((u8 *)preqparm) + sizeof(*preqparm) + keysize);
|
||||
plv2->len = sizeof(struct lv2);
|
||||
plv2->keyid.len = sizeof(struct keyid);
|
||||
plv2->keyid.attr = 0x30;
|
||||
preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
|
||||
preqcblk->req_parml = sizeof(*preqparm) + keysize + sizeof(*plv2);
|
||||
|
||||
/* fill xcrb struct */
|
||||
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
|
||||
|
@ -761,22 +761,22 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
|
|||
u16 key_name_2_len;
|
||||
u16 user_data_1_len;
|
||||
u16 user_data_2_len;
|
||||
u8 key_name_1[0];
|
||||
u8 key_name_2[0];
|
||||
u8 user_data_1[0];
|
||||
u8 user_data_2[0];
|
||||
/* u8 key_name_1[]; */
|
||||
/* u8 key_name_2[]; */
|
||||
/* u8 user_data_1[]; */
|
||||
/* u8 user_data_2[]; */
|
||||
} vud;
|
||||
struct {
|
||||
u16 len;
|
||||
struct {
|
||||
u16 len;
|
||||
u16 flag;
|
||||
u8 kek_id_1[0];
|
||||
/* u8 kek_id_1[]; */
|
||||
} tlv1;
|
||||
struct {
|
||||
u16 len;
|
||||
u16 flag;
|
||||
u8 kek_id_2[0];
|
||||
/* u8 kek_id_2[]; */
|
||||
} tlv2;
|
||||
struct {
|
||||
u16 len;
|
||||
|
@ -786,17 +786,17 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
|
|||
struct {
|
||||
u16 len;
|
||||
u16 flag;
|
||||
u8 gen_key_id_1_label[0];
|
||||
/* u8 gen_key_id_1_label[]; */
|
||||
} tlv4;
|
||||
struct {
|
||||
u16 len;
|
||||
u16 flag;
|
||||
u8 gen_key_id_2[0];
|
||||
/* u8 gen_key_id_2[]; */
|
||||
} tlv5;
|
||||
struct {
|
||||
u16 len;
|
||||
u16 flag;
|
||||
u8 gen_key_id_2_label[0];
|
||||
/* u8 gen_key_id_2_label[]; */
|
||||
} tlv6;
|
||||
} kb;
|
||||
} __packed * preqparm;
|
||||
|
@ -811,7 +811,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
|
|||
struct {
|
||||
u16 len;
|
||||
u16 flag;
|
||||
u8 gen_key[0]; /* 120-136 bytes */
|
||||
u8 gen_key[]; /* 120-136 bytes */
|
||||
} tlv1;
|
||||
} kb;
|
||||
} __packed * prepparm;
|
||||
|
@ -955,7 +955,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
|
|||
struct rule_array_block {
|
||||
u8 subfunc_code[2];
|
||||
u16 rule_array_len;
|
||||
char rule_array[0];
|
||||
char rule_array[];
|
||||
} __packed * preq_ra_block;
|
||||
struct vud_block {
|
||||
u16 len;
|
||||
|
@ -967,7 +967,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
|
|||
struct {
|
||||
u16 len;
|
||||
u16 flag; /* 0x0063 */
|
||||
u8 clr_key[0]; /* clear key value bytes */
|
||||
u8 clr_key[]; /* clear key value bytes */
|
||||
} tlv2;
|
||||
} __packed * preq_vud_block;
|
||||
struct key_block {
|
||||
|
@ -975,7 +975,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
|
|||
struct {
|
||||
u16 len;
|
||||
u16 flag; /* 0x0030 */
|
||||
u8 key_token[0]; /* key skeleton */
|
||||
u8 key_token[]; /* key skeleton */
|
||||
} tlv1;
|
||||
} __packed * preq_key_block;
|
||||
struct iprepparm {
|
||||
|
@ -989,7 +989,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
|
|||
struct {
|
||||
u16 len;
|
||||
u16 flag; /* 0x0030 */
|
||||
u8 key_token[0]; /* key token */
|
||||
u8 key_token[]; /* key token */
|
||||
} tlv1;
|
||||
} kb;
|
||||
} __packed * prepparm;
|
||||
|
@ -1201,7 +1201,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
|
|||
u16 len;
|
||||
u16 cca_key_token_len;
|
||||
u16 cca_key_token_flags;
|
||||
u8 cca_key_token[0]; // 64 or more
|
||||
u8 cca_key_token[]; /* 64 or more */
|
||||
} kb;
|
||||
} __packed * preqparm;
|
||||
struct aurepparm {
|
||||
|
@ -1370,7 +1370,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
|
|||
u16 len;
|
||||
u16 cca_key_token_len;
|
||||
u16 cca_key_token_flags;
|
||||
u8 cca_key_token[0];
|
||||
u8 cca_key_token[];
|
||||
} kb;
|
||||
} __packed * preqparm;
|
||||
struct aurepparm {
|
||||
|
@ -1387,17 +1387,15 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
|
|||
u8 form;
|
||||
u8 pad1[3];
|
||||
u16 keylen;
|
||||
u8 key[0]; /* the key (keylen bytes) */
|
||||
u16 keyattrlen;
|
||||
u8 keyattr[32];
|
||||
u8 pad2[1];
|
||||
u8 vptype;
|
||||
u8 vp[32]; /* verification pattern */
|
||||
u8 key[]; /* the key (keylen bytes) */
|
||||
/* u16 keyattrlen; */
|
||||
/* u8 keyattr[32]; */
|
||||
/* u8 pad2[1]; */
|
||||
/* u8 vptype; */
|
||||
/* u8 vp[32]; verification pattern */
|
||||
} ckb;
|
||||
} vud;
|
||||
struct {
|
||||
u16 len;
|
||||
} kb;
|
||||
/* followed by a key block */
|
||||
} __packed * prepparm;
|
||||
int keylen = ((struct eccprivkeytoken *)key)->len;
|
||||
|
||||
|
@ -1525,7 +1523,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
|
|||
size_t parmbsize = sizeof(struct fqreqparm);
|
||||
struct fqrepparm {
|
||||
u8 subfunc_code[2];
|
||||
u8 lvdata[0];
|
||||
u8 lvdata[];
|
||||
} __packed * prepparm;
|
||||
|
||||
/* get already prepared memory for 2 cprbs with param block each */
|
||||
|
|
|
@ -75,7 +75,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
|
|||
if (ap_domain_index >= 0)
|
||||
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
|
||||
return sysfs_emit(buf, "%s\n", ci.serial);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_cca_serialnr =
|
||||
|
@ -110,51 +110,46 @@ static ssize_t cca_mkvps_show(struct device *dev,
|
|||
&ci, zq->online);
|
||||
|
||||
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
|
||||
n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_aes_mk_state - '1'],
|
||||
ci.new_aes_mkvp);
|
||||
n = sysfs_emit(buf, "AES NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_aes_mk_state - '1'],
|
||||
ci.new_aes_mkvp);
|
||||
else
|
||||
n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
|
||||
n = sysfs_emit(buf, "AES NEW: - -\n");
|
||||
|
||||
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"AES CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_aes_mk_state - '1'],
|
||||
ci.cur_aes_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_aes_mk_state - '1'],
|
||||
ci.cur_aes_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
|
||||
|
||||
if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"AES OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_aes_mk_state - '1'],
|
||||
ci.old_aes_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_aes_mk_state - '1'],
|
||||
ci.old_aes_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
|
||||
|
||||
if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"APKA NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_apka_mk_state - '1'],
|
||||
ci.new_apka_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_apka_mk_state - '1'],
|
||||
ci.new_apka_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
|
||||
|
||||
if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"APKA CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_apka_mk_state - '1'],
|
||||
ci.cur_apka_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_apka_mk_state - '1'],
|
||||
ci.cur_apka_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
|
||||
|
||||
if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"APKA OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_apka_mk_state - '1'],
|
||||
ci.old_apka_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_apka_mk_state - '1'],
|
||||
ci.old_apka_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -181,7 +176,7 @@ static const struct attribute_group cca_queue_attr_grp = {
|
|||
static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
|
||||
{
|
||||
struct ap_message ap_msg;
|
||||
unsigned long long psmid;
|
||||
unsigned long psmid;
|
||||
unsigned int domain;
|
||||
struct {
|
||||
struct type86_hdr hdr;
|
||||
|
@ -203,21 +198,22 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
|
|||
ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ap_msg.msg)
|
||||
return -ENOMEM;
|
||||
ap_msg.bufsize = PAGE_SIZE;
|
||||
|
||||
rng_type6cprb_msgx(&ap_msg, 4, &domain);
|
||||
|
||||
msg = ap_msg.msg;
|
||||
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
|
||||
|
||||
rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
|
||||
rc = ap_send(aq->qid, 0x0102030405060708UL, ap_msg.msg, ap_msg.len);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
/* Wait for the test message to complete. */
|
||||
for (i = 0; i < 2 * HZ; i++) {
|
||||
msleep(1000 / HZ);
|
||||
rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
|
||||
if (rc == 0 && psmid == 0x0102030405060708ULL)
|
||||
rc = ap_recv(aq->qid, &psmid, ap_msg.msg, ap_msg.bufsize);
|
||||
if (rc == 0 && psmid == 0x0102030405060708UL)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -342,7 +338,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
|
|||
zq->queue = aq;
|
||||
zq->online = 1;
|
||||
atomic_set(&zq->load, 0);
|
||||
ap_rapq(aq->qid);
|
||||
ap_rapq(aq->qid, 0);
|
||||
rc = zcrypt_cex2c_rng_supported(aq);
|
||||
if (rc < 0) {
|
||||
zcrypt_queue_free(zq);
|
||||
|
|
|
@ -88,7 +88,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
|
|||
if (ap_domain_index >= 0)
|
||||
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
|
||||
return sysfs_emit(buf, "%s\n", ci.serial);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_cca_serialnr =
|
||||
|
@ -123,79 +123,70 @@ static ssize_t cca_mkvps_show(struct device *dev,
|
|||
&ci, zq->online);
|
||||
|
||||
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
|
||||
n += scnprintf(buf + n, PAGE_SIZE,
|
||||
"AES NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_aes_mk_state - '1'],
|
||||
ci.new_aes_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_aes_mk_state - '1'],
|
||||
ci.new_aes_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE, "AES NEW: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "AES NEW: - -\n");
|
||||
|
||||
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"AES CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_aes_mk_state - '1'],
|
||||
ci.cur_aes_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_aes_mk_state - '1'],
|
||||
ci.cur_aes_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
|
||||
|
||||
if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"AES OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_aes_mk_state - '1'],
|
||||
ci.old_aes_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_aes_mk_state - '1'],
|
||||
ci.old_aes_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
|
||||
|
||||
if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"APKA NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_apka_mk_state - '1'],
|
||||
ci.new_apka_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_apka_mk_state - '1'],
|
||||
ci.new_apka_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
|
||||
|
||||
if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"APKA CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_apka_mk_state - '1'],
|
||||
ci.cur_apka_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_apka_mk_state - '1'],
|
||||
ci.cur_apka_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
|
||||
|
||||
if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"APKA OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_apka_mk_state - '1'],
|
||||
ci.old_apka_mkvp);
|
||||
n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_apka_mk_state - '1'],
|
||||
ci.old_apka_mkvp);
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
|
||||
|
||||
if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3')
|
||||
n += scnprintf(buf + n, PAGE_SIZE,
|
||||
"ASYM NEW: %s 0x%016llx%016llx\n",
|
||||
new_state[ci.new_asym_mk_state - '1'],
|
||||
*((u64 *)(ci.new_asym_mkvp)),
|
||||
*((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
|
||||
n += sysfs_emit_at(buf, n, "ASYM NEW: %s 0x%016llx%016llx\n",
|
||||
new_state[ci.new_asym_mk_state - '1'],
|
||||
*((u64 *)(ci.new_asym_mkvp)),
|
||||
*((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE, "ASYM NEW: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "ASYM NEW: - -\n");
|
||||
|
||||
if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"ASYM CUR: %s 0x%016llx%016llx\n",
|
||||
cao_state[ci.cur_asym_mk_state - '1'],
|
||||
*((u64 *)(ci.cur_asym_mkvp)),
|
||||
*((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
|
||||
n += sysfs_emit_at(buf, n, "ASYM CUR: %s 0x%016llx%016llx\n",
|
||||
cao_state[ci.cur_asym_mk_state - '1'],
|
||||
*((u64 *)(ci.cur_asym_mkvp)),
|
||||
*((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM CUR: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "ASYM CUR: - -\n");
|
||||
|
||||
if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2')
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"ASYM OLD: %s 0x%016llx%016llx\n",
|
||||
cao_state[ci.old_asym_mk_state - '1'],
|
||||
*((u64 *)(ci.old_asym_mkvp)),
|
||||
*((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
|
||||
n += sysfs_emit_at(buf, n, "ASYM OLD: %s 0x%016llx%016llx\n",
|
||||
cao_state[ci.old_asym_mk_state - '1'],
|
||||
*((u64 *)(ci.old_asym_mkvp)),
|
||||
*((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
|
||||
else
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM OLD: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "ASYM OLD: - -\n");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -228,9 +219,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
|
|||
ep11_get_card_info(ac->id, &ci, zc->online);
|
||||
|
||||
if (ci.API_ord_nr > 0)
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
|
||||
return sysfs_emit(buf, "%u\n", ci.API_ord_nr);
|
||||
else
|
||||
return scnprintf(buf, PAGE_SIZE, "\n");
|
||||
return sysfs_emit(buf, "\n");
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_ep11_api_ordinalnr =
|
||||
|
@ -249,11 +240,11 @@ static ssize_t ep11_fw_version_show(struct device *dev,
|
|||
ep11_get_card_info(ac->id, &ci, zc->online);
|
||||
|
||||
if (ci.FW_version > 0)
|
||||
return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
|
||||
(int)(ci.FW_version >> 8),
|
||||
(int)(ci.FW_version & 0xFF));
|
||||
return sysfs_emit(buf, "%d.%d\n",
|
||||
(int)(ci.FW_version >> 8),
|
||||
(int)(ci.FW_version & 0xFF));
|
||||
else
|
||||
return scnprintf(buf, PAGE_SIZE, "\n");
|
||||
return sysfs_emit(buf, "\n");
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_ep11_fw_version =
|
||||
|
@ -272,9 +263,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
|
|||
ep11_get_card_info(ac->id, &ci, zc->online);
|
||||
|
||||
if (ci.serial[0])
|
||||
return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
|
||||
return sysfs_emit(buf, "%16.16s\n", ci.serial);
|
||||
else
|
||||
return scnprintf(buf, PAGE_SIZE, "\n");
|
||||
return sysfs_emit(buf, "\n");
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_ep11_serialnr =
|
||||
|
@ -309,11 +300,11 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
|
|||
if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
|
||||
if (n > 0)
|
||||
buf[n++] = ' ';
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"%s", ep11_op_modes[i].mode_txt);
|
||||
n += sysfs_emit_at(buf, n, "%s",
|
||||
ep11_op_modes[i].mode_txt);
|
||||
}
|
||||
}
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -356,29 +347,29 @@ static ssize_t ep11_mkvps_show(struct device *dev,
|
|||
&di);
|
||||
|
||||
if (di.cur_wk_state == '0') {
|
||||
n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
|
||||
cwk_state[di.cur_wk_state - '0']);
|
||||
n = sysfs_emit(buf, "WK CUR: %s -\n",
|
||||
cwk_state[di.cur_wk_state - '0']);
|
||||
} else if (di.cur_wk_state == '1') {
|
||||
n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
|
||||
cwk_state[di.cur_wk_state - '0']);
|
||||
n = sysfs_emit(buf, "WK CUR: %s 0x",
|
||||
cwk_state[di.cur_wk_state - '0']);
|
||||
bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
|
||||
n += 2 * sizeof(di.cur_wkvp);
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
} else {
|
||||
n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
|
||||
n = sysfs_emit(buf, "WK CUR: - -\n");
|
||||
}
|
||||
|
||||
if (di.new_wk_state == '0') {
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
|
||||
nwk_state[di.new_wk_state - '0']);
|
||||
n += sysfs_emit_at(buf, n, "WK NEW: %s -\n",
|
||||
nwk_state[di.new_wk_state - '0']);
|
||||
} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
|
||||
nwk_state[di.new_wk_state - '0']);
|
||||
n += sysfs_emit_at(buf, n, "WK NEW: %s 0x",
|
||||
nwk_state[di.new_wk_state - '0']);
|
||||
bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
|
||||
n += 2 * sizeof(di.new_wkvp);
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
} else {
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
|
||||
n += sysfs_emit_at(buf, n, "WK NEW: - -\n");
|
||||
}
|
||||
|
||||
return n;
|
||||
|
@ -406,11 +397,11 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
|
|||
if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
|
||||
if (n > 0)
|
||||
buf[n++] = ' ';
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n,
|
||||
"%s", ep11_op_modes[i].mode_txt);
|
||||
n += sysfs_emit_at(buf, n, "%s",
|
||||
ep11_op_modes[i].mode_txt);
|
||||
}
|
||||
}
|
||||
n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
|
||||
n += sysfs_emit_at(buf, n, "\n");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
|
|
@ -1275,7 +1275,7 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
|
|||
u32 pkeybitsize;
|
||||
u64 pkeysize;
|
||||
u8 res2[8];
|
||||
u8 pkey[0];
|
||||
u8 pkey[];
|
||||
} __packed * wki;
|
||||
const u8 *key;
|
||||
struct ep11kblob_header *hdr;
|
||||
|
|
|
@ -441,14 +441,17 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq,
|
|||
t80h = reply->msg;
|
||||
if (t80h->type == TYPE80_RSP_CODE) {
|
||||
len = t80h->len;
|
||||
if (len > reply->bufsize || len > msg->bufsize) {
|
||||
if (len > reply->bufsize || len > msg->bufsize ||
|
||||
len != reply->len) {
|
||||
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
|
||||
msg->rc = -EMSGSIZE;
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
goto out;
|
||||
}
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, sizeof(error_reply));
|
||||
msg->len = sizeof(error_reply);
|
||||
}
|
||||
out:
|
||||
complete((struct completion *)msg->private);
|
||||
|
@ -476,7 +479,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
|
|||
if (!ap_msg->msg)
|
||||
return -ENOMEM;
|
||||
ap_msg->receive = zcrypt_cex2a_receive;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = &work;
|
||||
rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
|
||||
|
@ -527,7 +530,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
|
|||
if (!ap_msg->msg)
|
||||
return -ENOMEM;
|
||||
ap_msg->receive = zcrypt_cex2a_receive;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = &work;
|
||||
rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
|
||||
|
|
|
@ -208,7 +208,7 @@ static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq,
|
|||
struct CPRBX cprbx;
|
||||
struct function_and_rules_block fr;
|
||||
unsigned short length;
|
||||
char text[0];
|
||||
char text[];
|
||||
} __packed * msg = ap_msg->msg;
|
||||
int size;
|
||||
|
||||
|
@ -278,7 +278,7 @@ static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq,
|
|||
struct CPRBX cprbx;
|
||||
struct function_and_rules_block fr;
|
||||
unsigned short length;
|
||||
char text[0];
|
||||
char text[];
|
||||
} __packed * msg = ap_msg->msg;
|
||||
int size;
|
||||
|
||||
|
@ -566,8 +566,8 @@ struct type86x_reply {
|
|||
struct type86_fmt2_ext fmt2;
|
||||
struct CPRBX cprbx;
|
||||
unsigned char pad[4]; /* 4 byte function code/rules block ? */
|
||||
unsigned short length;
|
||||
char text[];
|
||||
unsigned short length; /* length of data including length field size */
|
||||
char data[];
|
||||
} __packed;
|
||||
|
||||
struct type86_ep11_reply {
|
||||
|
@ -581,45 +581,9 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
|
|||
char __user *outputdata,
|
||||
unsigned int outputdatalength)
|
||||
{
|
||||
static unsigned char static_pad[] = {
|
||||
0x00, 0x02,
|
||||
0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
|
||||
0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
|
||||
0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
|
||||
0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
|
||||
0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
|
||||
0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
|
||||
0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
|
||||
0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
|
||||
0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
|
||||
0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
|
||||
0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
|
||||
0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
|
||||
0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
|
||||
0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
|
||||
0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
|
||||
0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
|
||||
0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
|
||||
0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
|
||||
0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
|
||||
0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
|
||||
0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
|
||||
0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
|
||||
0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
|
||||
0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
|
||||
0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
|
||||
0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
|
||||
0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
|
||||
0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
|
||||
0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
|
||||
0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
|
||||
0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
|
||||
0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
|
||||
};
|
||||
struct type86x_reply *msg = reply->msg;
|
||||
unsigned short service_rc, service_rs;
|
||||
unsigned int reply_len, pad_len;
|
||||
char *data;
|
||||
unsigned int data_len;
|
||||
|
||||
service_rc = msg->cprbx.ccp_rtcode;
|
||||
if (unlikely(service_rc != 0)) {
|
||||
|
@ -647,32 +611,12 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
|
|||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
data = msg->text;
|
||||
reply_len = msg->length - 2;
|
||||
if (reply_len > outputdatalength)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* For all encipher requests, the length of the ciphertext (reply_len)
|
||||
* will always equal the modulus length. For MEX decipher requests
|
||||
* the output needs to get padded. Minimum pad size is 10.
|
||||
*
|
||||
* Currently, the cases where padding will be added is for:
|
||||
* - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
|
||||
* ZERO-PAD and CRT is only supported for PKD requests)
|
||||
* - PCICC, always
|
||||
*/
|
||||
pad_len = outputdatalength - reply_len;
|
||||
if (pad_len > 0) {
|
||||
if (pad_len < 10)
|
||||
return -EINVAL;
|
||||
/* 'restore' padding left in the CEXXC card. */
|
||||
if (copy_to_user(outputdata, static_pad, pad_len - 1))
|
||||
return -EFAULT;
|
||||
if (put_user(0, outputdata + pad_len - 1))
|
||||
return -EFAULT;
|
||||
}
|
||||
data_len = msg->length - sizeof(msg->length);
|
||||
if (data_len > outputdatalength)
|
||||
return -EMSGSIZE;
|
||||
|
||||
/* Copy the crypto response to user space. */
|
||||
if (copy_to_user(outputdata + pad_len, data, reply_len))
|
||||
if (copy_to_user(outputdata, msg->data, data_len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -926,8 +870,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
|
|||
.type = TYPE82_RSP_CODE,
|
||||
.reply_code = REP82_ERROR_MACHINE_FAILURE,
|
||||
};
|
||||
struct response_type *resp_type =
|
||||
(struct response_type *)msg->private;
|
||||
struct response_type *resp_type = msg->private;
|
||||
struct type86x_reply *t86r;
|
||||
int len;
|
||||
|
||||
|
@ -939,28 +882,37 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
|
|||
t86r->cprbx.cprb_ver_id == 0x02) {
|
||||
switch (resp_type->type) {
|
||||
case CEXXC_RESPONSE_TYPE_ICA:
|
||||
len = sizeof(struct type86x_reply) + t86r->length - 2;
|
||||
if (len > reply->bufsize || len > msg->bufsize) {
|
||||
len = sizeof(struct type86x_reply) + t86r->length;
|
||||
if (len > reply->bufsize || len > msg->bufsize ||
|
||||
len != reply->len) {
|
||||
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
|
||||
msg->rc = -EMSGSIZE;
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
goto out;
|
||||
}
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
break;
|
||||
case CEXXC_RESPONSE_TYPE_XCRB:
|
||||
len = t86r->fmt2.offset2 + t86r->fmt2.count2;
|
||||
if (len > reply->bufsize || len > msg->bufsize) {
|
||||
if (t86r->fmt2.count2)
|
||||
len = t86r->fmt2.offset2 + t86r->fmt2.count2;
|
||||
else
|
||||
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
|
||||
if (len > reply->bufsize || len > msg->bufsize ||
|
||||
len != reply->len) {
|
||||
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
|
||||
msg->rc = -EMSGSIZE;
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
goto out;
|
||||
}
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
break;
|
||||
default:
|
||||
memcpy(msg->msg, &error_reply, sizeof(error_reply));
|
||||
msg->len = sizeof(error_reply);
|
||||
}
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, sizeof(error_reply));
|
||||
msg->len = sizeof(error_reply);
|
||||
}
|
||||
out:
|
||||
complete(&resp_type->work);
|
||||
|
@ -982,8 +934,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
|
|||
.type = TYPE82_RSP_CODE,
|
||||
.reply_code = REP82_ERROR_MACHINE_FAILURE,
|
||||
};
|
||||
struct response_type *resp_type =
|
||||
(struct response_type *)msg->private;
|
||||
struct response_type *resp_type = msg->private;
|
||||
struct type86_ep11_reply *t86r;
|
||||
int len;
|
||||
|
||||
|
@ -996,18 +947,22 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
|
|||
switch (resp_type->type) {
|
||||
case CEXXC_RESPONSE_TYPE_EP11:
|
||||
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
|
||||
if (len > reply->bufsize || len > msg->bufsize) {
|
||||
if (len > reply->bufsize || len > msg->bufsize ||
|
||||
len != reply->len) {
|
||||
ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
|
||||
msg->rc = -EMSGSIZE;
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
goto out;
|
||||
}
|
||||
memcpy(msg->msg, reply->msg, len);
|
||||
msg->len = len;
|
||||
break;
|
||||
default:
|
||||
memcpy(msg->msg, &error_reply, sizeof(error_reply));
|
||||
msg->len = sizeof(error_reply);
|
||||
}
|
||||
} else {
|
||||
memcpy(msg->msg, reply->msg, sizeof(error_reply));
|
||||
msg->len = sizeof(error_reply);
|
||||
}
|
||||
out:
|
||||
complete(&resp_type->work);
|
||||
|
@ -1036,7 +991,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
|
|||
return -ENOMEM;
|
||||
ap_msg->bufsize = PAGE_SIZE;
|
||||
ap_msg->receive = zcrypt_msgtype6_receive;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = &resp_type;
|
||||
rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex);
|
||||
|
@ -1086,7 +1041,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
|
|||
return -ENOMEM;
|
||||
ap_msg->bufsize = PAGE_SIZE;
|
||||
ap_msg->receive = zcrypt_msgtype6_receive;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = &resp_type;
|
||||
rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt);
|
||||
|
@ -1137,7 +1092,7 @@ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
|
|||
if (!ap_msg->msg)
|
||||
return -ENOMEM;
|
||||
ap_msg->receive = zcrypt_msgtype6_receive;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
|
||||
if (!ap_msg->private)
|
||||
|
@ -1157,7 +1112,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
|
|||
struct ap_message *ap_msg)
|
||||
{
|
||||
int rc;
|
||||
struct response_type *rtype = (struct response_type *)(ap_msg->private);
|
||||
struct response_type *rtype = ap_msg->private;
|
||||
struct {
|
||||
struct type6_hdr hdr;
|
||||
struct CPRBX cprbx;
|
||||
|
@ -1218,7 +1173,7 @@ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
|
|||
if (!ap_msg->msg)
|
||||
return -ENOMEM;
|
||||
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
|
||||
if (!ap_msg->private)
|
||||
|
@ -1240,7 +1195,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
|
|||
{
|
||||
int rc;
|
||||
unsigned int lfmt;
|
||||
struct response_type *rtype = (struct response_type *)(ap_msg->private);
|
||||
struct response_type *rtype = ap_msg->private;
|
||||
struct {
|
||||
struct type6_hdr hdr;
|
||||
struct ep11_cprb cprbx;
|
||||
|
@ -1328,7 +1283,7 @@ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code,
|
|||
if (!ap_msg->msg)
|
||||
return -ENOMEM;
|
||||
ap_msg->receive = zcrypt_msgtype6_receive;
|
||||
ap_msg->psmid = (((unsigned long long)current->pid) << 32) +
|
||||
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
|
||||
atomic_inc_return(&zcrypt_step);
|
||||
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
|
||||
if (!ap_msg->private)
|
||||
|
@ -1359,7 +1314,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
|
|||
short int verb_length;
|
||||
short int key_length;
|
||||
} __packed * msg = ap_msg->msg;
|
||||
struct response_type *rtype = (struct response_type *)(ap_msg->private);
|
||||
struct response_type *rtype = ap_msg->private;
|
||||
int rc;
|
||||
|
||||
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
|
||||
|
|
|
@ -44,7 +44,7 @@ static ssize_t online_show(struct device *dev,
|
|||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
int online = aq->config && zq->online ? 1 : 0;
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
|
||||
return sysfs_emit(buf, "%d\n", online);
|
||||
}
|
||||
|
||||
static ssize_t online_store(struct device *dev,
|
||||
|
@ -84,7 +84,7 @@ static ssize_t load_show(struct device *dev,
|
|||
{
|
||||
struct zcrypt_queue *zq = dev_get_drvdata(dev);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
|
||||
return sysfs_emit(buf, "%d\n", atomic_read(&zq->load));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(load);
|
||||
|
|
|
@ -70,6 +70,18 @@ late_initcall(stackleak_sysctls_init);
|
|||
#define skip_erasing() false
|
||||
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
|
||||
|
||||
#ifndef __stackleak_poison
|
||||
static __always_inline void __stackleak_poison(unsigned long erase_low,
|
||||
unsigned long erase_high,
|
||||
unsigned long poison)
|
||||
{
|
||||
while (erase_low < erase_high) {
|
||||
*(unsigned long *)erase_low = poison;
|
||||
erase_low += sizeof(unsigned long);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void __stackleak_erase(bool on_task_stack)
|
||||
{
|
||||
const unsigned long task_stack_low = stackleak_task_low_bound(current);
|
||||
|
@ -101,10 +113,7 @@ static __always_inline void __stackleak_erase(bool on_task_stack)
|
|||
else
|
||||
erase_high = task_stack_high;
|
||||
|
||||
while (erase_low < erase_high) {
|
||||
*(unsigned long *)erase_low = STACKLEAK_POISON;
|
||||
erase_low += sizeof(unsigned long);
|
||||
}
|
||||
__stackleak_poison(erase_low, erase_high, STACKLEAK_POISON);
|
||||
|
||||
/* Reset the 'lowest_stack' value for the next syscall */
|
||||
current->lowest_stack = task_stack_high;
|
||||
|
|
|
@ -502,7 +502,7 @@ config SECTION_MISMATCH_WARN_ONLY
|
|||
|
||||
config DEBUG_FORCE_FUNCTION_ALIGN_64B
|
||||
bool "Force all function address 64B aligned"
|
||||
depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC)
|
||||
depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || S390)
|
||||
select FUNCTION_ALIGNMENT_64B
|
||||
help
|
||||
There are cases that a commit from one domain changes the function
|
||||
|
|
Loading…
Reference in New Issue