s390 updates for the 4.20 merge window

- Improved access control for the zcrypt driver, multiple device nodes
    can now be created with different access control lists
 
  - Extend the pkey API to provide random protected keys, this is useful
    for encrypted swap device with ephemeral protected keys
 
  - Add support for virtually mapped kernel stacks
 
  - Rework the early boot code, this moves the memory detection into the
    boot code that runs prior to decompression.
 
  - Add KASAN support
 
  - Bug fixes and cleanups
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJbzXKpAAoJEDjwexyKj9rg98YH/jZ5/kEYV44JsACroTNBC782
 6QLCvoCvSgXUAqRwnIfnxrcjrUVNW2aK6rOSsI/I8rQDsSA3boJ7FimoEI2BsUZG
 dcMy0hC47AYB7yKREQX3gdDEj8f0bn8v2ize5F6gwLkIx0A+aBUSivRQeYMaF8sn
 N/5OkSJwjCb+ZkNmDa3SHif+hC5+iL+q1hfuBdQkeCBok9pAqhyosRkgLe8CQgUV
 HGrvaWJ4FudIpg4tu2jL2OsNoZFX2pK5d+Up886+KGKQEUfiXKYtdmzX17Vd7PIk
 Vkf7EWUipzIA7UtrJ6pljoFsrNa+83jm4j5Dgy0ohadCVUBYLORte3yEl4P1EoM=
 =MMf0
 -----END PGP SIGNATURE-----

Merge tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:

 - Improved access control for the zcrypt driver, multiple device nodes
   can now be created with different access control lists

 - Extend the pkey API to provide random protected keys, this is useful
   for encrypted swap device with ephemeral protected keys

 - Add support for virtually mapped kernel stacks

 - Rework the early boot code, this moves the memory detection into the
   boot code that runs prior to decompression.

 - Add KASAN support

 - Bug fixes and cleanups

* tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (83 commits)
  s390/pkey: move pckmo subfunction available checks away from module init
  s390/kasan: support preemptible kernel build
  s390/pkey: Load pkey kernel module automatically
  s390/perf: Return error when debug_register fails
  s390/sthyi: Fix machine name validity indication
  s390/zcrypt: fix broken zcrypt_send_cprb in-kernel api function
  s390/vmalloc: fix VMALLOC_START calculation
  s390/mem_detect: add missing include
  s390/dumpstack: print psw mask and address again
  s390/crypto: Enhance paes cipher to accept variable length key material
  s390/pkey: Introduce new API for transforming key blobs
  s390/pkey: Introduce new API for random protected key verification
  s390/pkey: Add sysfs attributes to emit secure key blobs
  s390/pkey: Add sysfs attributes to emit protected key blobs
  s390/pkey: Define protected key blob format
  s390/pkey: Introduce new API for random protected key generation
  s390/zcrypt: add ap_adapter_mask sysfs attribute
  s390/zcrypt: provide apfs failure code on type 86 error reply
  s390/zcrypt: zcrypt device driver cleanup
  s390/kasan: add support for mem= kernel parameter
  ...
This commit is contained in:
Linus Torvalds 2018-10-23 11:14:47 +01:00
commit e2b623fbe6
121 changed files with 3712 additions and 1183 deletions

View File

@ -56,6 +56,12 @@ config PCI_QUIRKS
config ARCH_SUPPORTS_UPROBES
def_bool y
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
default 0x18000000000000 if KASAN_S390_4_LEVEL_PAGING
default 0x30000000000
config S390
def_bool y
select ARCH_BINFMT_ELF_STATE
@ -120,11 +126,13 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
@ -649,6 +657,7 @@ config PACK_STACK
config CHECK_STACK
def_bool y
depends on !VMAP_STACK
prompt "Detect kernel stack overflow"
help
This option enables the compiler option -mstack-guard and

View File

@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
STACK_SIZE := 16384
STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384)
CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD

View File

@ -137,6 +137,14 @@ static void appldata_work_fn(struct work_struct *work)
mutex_unlock(&appldata_ops_mutex);
}
static struct appldata_product_id appldata_id = {
.prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
.prod_fn = 0xD5D3, /* "NL" */
.version_nr = 0xF2F6, /* "26" */
.release_nr = 0xF0F1, /* "01" */
};
/*
* appldata_diag()
*
@ -145,17 +153,22 @@ static void appldata_work_fn(struct work_struct *work)
int appldata_diag(char record_nr, u16 function, unsigned long buffer,
u16 length, char *mod_lvl)
{
struct appldata_product_id id = {
.prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
.prod_fn = 0xD5D3, /* "NL" */
.version_nr = 0xF2F6, /* "26" */
.release_nr = 0xF0F1, /* "01" */
};
struct appldata_parameter_list *parm_list;
struct appldata_product_id *id;
int rc;
id.record_nr = record_nr;
id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
return appldata_asm(&id, function, (void *) buffer, length);
parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
id = kmemdup(&appldata_id, sizeof(appldata_id), GFP_KERNEL);
rc = -ENOMEM;
if (parm_list && id) {
id->record_nr = record_nr;
id->mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
rc = appldata_asm(parm_list, id, function,
(void *) buffer, length);
}
kfree(id);
kfree(parm_list);
return rc;
}
/************************ timer, work, DIAG <END> ****************************/

View File

@ -1,2 +1,3 @@
image
bzImage
section_cmp.*

View File

@ -6,6 +6,7 @@
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
@ -27,15 +28,32 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o ebcdic.o sclp_early_core.o mem.o
targets := bzImage startup.a $(obj-y)
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o
obj-y += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o
targets := bzImage startup.a section_cmp.boot.data $(obj-y)
subdir- := compressed
OBJECTS := $(addprefix $(obj)/,$(obj-y))
$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
quiet_cmd_section_cmp = SECTCMP $*
define cmd_section_cmp
s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \
sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
s2=`$(OBJDUMP) -t -j "$*" "$(word 2,$^)" | sort | \
sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
if [ "$$s1" != "$$s2" ]; then \
echo "error: section $* differs between $< and $(word 2,$^)" >&2; \
exit 1; \
fi; \
touch $@
endef
$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE
$(call if_changed,objcopy)
$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
$(call if_changed,section_cmp)
$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@

11
arch/s390/boot/boot.h Normal file
View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BOOT_BOOT_H
#define BOOT_BOOT_H
void startup_kernel(void);
void detect_memory(void);
void store_ipl_parmblock(void);
void setup_boot_command_line(void);
void setup_memory_end(void);
#endif /* BOOT_BOOT_H */

2
arch/s390/boot/cmdline.c Normal file
View File

@ -0,0 +1,2 @@
// SPDX-License-Identifier: GPL-2.0
#include "../../../lib/cmdline.c"

View File

@ -8,14 +8,16 @@
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) piggy.o info.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
targets += info.bin $(obj-y)
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
OBJCOPYFLAGS :=
OBJECTS := $(addprefix $(obj)/,$(obj-y))
@ -23,23 +25,16 @@ LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
$(call if_changed,ld)
# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
$(obj)/info.bin: vmlinux FORCE
$(call if_changed,objcopy)
quiet_cmd_sizes = GEN $@
cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info
$(obj)/info.o: $(obj)/info.bin FORCE
$(call if_changed,objcopy)
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
AFLAGS_head.o += -I$(objtree)/$(obj)
$(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux
OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
@ -64,10 +59,10 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
$(call if_changed,xzkern)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
$(call if_changed,ld)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
$(call if_changed,objcopy)
chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS))
chkbss-target := $(obj)/vmlinux.bin
include $(srctree)/arch/s390/scripts/Makefile.chkbss

View File

@ -0,0 +1,85 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Definitions and wrapper functions for kernel decompressor
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
#include "decompressor.h"
/*
* gzip declarations
*/
#define STATIC static
#define STATIC_RW_DATA static __section(.data)
#undef memset
#undef memcpy
#undef memmove
#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
/* Symbols defined by linker scripts */
extern char _end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
static unsigned long free_mem_ptr = (unsigned long) _end;
static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE;
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
#define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE)
unsigned long mem_safe_offset(void)
{
/*
* due to 4MB HEAD_SIZE for bzip2
* 'decompress_offset + vmlinux.image_size' could be larger than
* kernel at final position + its .bss, so take the larger of two
*/
return max(decompress_offset + vmlinux.image_size,
vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
}
void *decompress_kernel(void)
{
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, 0, NULL, error);
return output;
}

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
#define BOOT_COMPRESSED_DECOMPRESSOR_H
#ifdef CONFIG_KERNEL_UNCOMPRESSED
static inline void *decompress_kernel(void) {}
#else
void *decompress_kernel(void);
#endif
unsigned long mem_safe_offset(void);
void error(char *m);
struct vmlinux_info {
unsigned long default_lma;
void (*entry)(void);
unsigned long image_size; /* does not include .bss */
unsigned long bss_size; /* uncompressed image .bss size */
unsigned long bootdata_off;
unsigned long bootdata_size;
};
extern char _vmlinux_info[];
#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */

View File

@ -1,52 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Startup glue code to uncompress the kernel
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
ENTRY(startup_decompressor)
basr %r13,0 # get base
.LPG1:
# setup stack
lg %r15,.Lstack-.LPG1(%r13)
aghi %r15,-160
brasl %r14,decompress_kernel
# Set up registers for memory mover. We move the decompressed image to
# 0x100000, where startup_continue of the decompressed image is supposed
# to be.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
lg %r3,.Lmvsize-.LPG1(%r13)
lgr %r5,%r3
# Move the memory mover someplace safe so it doesn't overwrite itself.
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# When the memory mover is done we pass control to
# arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
# the decompressed image.
lgr %r6,%r2
br %r1
mover:
mvcle %r2,%r4,0
jo mover
br %r6
mover_end:
.align 8
.Lstack:
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
.quad 0x100000
.Lmvsize:
.quad SZ__bss_start

View File

@ -1,116 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Definitions and wrapper functions for kernel decompressor
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
#include "sizes.h"
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#undef memmove
#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
extern char _end[];
extern char _bss[], _ebss[];
static void error(char *m);
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
static int puts(const char *s)
{
sclp_early_printk(s);
return 0;
}
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
asm volatile("lpsw %0" : : "Q" (psw));
}
unsigned long decompress_kernel(void)
{
void *output, *kernel_end;
output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
kernel_end = output + SZ__bss_start;
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
* kernel image. This also prevents initrd corruption caused by
* bss clearing since kernel_end will always be located behind the
* current bss section..
*/
if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) kernel_end;
}
#endif
/*
* Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
* initialized afterwards since they reside in bss.
*/
memset(_bss, 0, _ebss - _bss);
free_mem_ptr = (unsigned long) _end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
return (unsigned long) output;
}

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/vmlinux.lds.h>
#include <asm/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
@ -8,9 +9,6 @@ ENTRY(startup)
SECTIONS
{
/* Be careful parts of head_64.S assume startup_32 is at
* address 0.
*/
. = 0;
.head.text : {
_head = . ;
@ -26,7 +24,7 @@ SECTIONS
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
*(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
*(.rodata.*)
_erodata = . ;
}
.data : {
@ -35,14 +33,28 @@ SECTIONS
*(.data.*)
_edata = . ;
}
startup_continue = 0x100000;
BOOT_DATA
/*
* uncompressed image info used by the decompressor it should match
* struct vmlinux_info. It comes from .vmlinux.info section of
* uncompressed vmlinux in a form of info.o
*/
. = ALIGN(8);
.vmlinux.info : {
_vmlinux_info = .;
*(.vmlinux.info)
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = 0x100000;
#else
. = ALIGN(8);
#endif
.rodata.compressed : {
*(.rodata.compressed)
_compressed_start = .;
*(.vmlinux.bin.compressed)
_compressed_end = .;
}
. = ALIGN(256);
.bss : {

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
SECTIONS
{
.rodata.compressed : {
#ifndef CONFIG_KERNEL_UNCOMPRESSED
input_len = .;
LONG(input_data_end - input_data) input_data = .;
#endif
*(.data)
#ifndef CONFIG_KERNEL_UNCOMPRESSED
output_len = . - 4;
input_data_end = .;
#endif
}
}

2
arch/s390/boot/ctype.c Normal file
View File

@ -0,0 +1,2 @@
// SPDX-License-Identifier: GPL-2.0
#include "../../../lib/ctype.c"

View File

@ -60,6 +60,9 @@ __HEAD
.long 0x02000690,0x60000050
.long 0x020006e0,0x20000050
.org 0x1a0
.quad 0,iplstart
.org 0x200
#
@ -308,16 +311,11 @@ ENTRY(startup_kdump)
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
#ifdef CONFIG_KERNEL_UNCOMPRESSED
jg startup_continue
#else
jg startup_decompressor
#endif
brasl %r14,startup_kernel
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.long 0x8000 + (1<<(PAGE_SHIFT+BOOT_STACK_ORDER)) - STACK_FRAME_OVERHEAD
.align 8
6: .long 0x7fffffff,0xffffffff

182
arch/s390/boot/ipl_parm.c Normal file
View File

@ -0,0 +1,182 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/ctype.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
struct ipl_parameter_block __bootdata(early_ipl_block);
int __bootdata(early_ipl_block_valid);
unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set);
int __bootdata(noexec_disabled);
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long)addr;
register unsigned long _rc asm("1") = 0;
unsigned long reg1, reg2;
psw_t old = S390_lowcore.program_new_psw;
asm volatile(
" epsw %0,%1\n"
" st %0,%[psw_pgm]\n"
" st %1,%[psw_pgm]+4\n"
" larl %0,1f\n"
" stg %0,%[psw_pgm]+8\n"
" diag %[addr],%[subcode],0x308\n"
"1: nopr %%r7\n"
: "=&d" (reg1), "=&a" (reg2),
[psw_pgm] "=Q" (S390_lowcore.program_new_psw),
[addr] "+d" (_addr), "+d" (_rc)
: [subcode] "d" (subcode)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return _rc;
}
void store_ipl_parmblock(void)
{
int rc;
rc = __diag308(DIAG308_STORE, &early_ipl_block);
if (rc == DIAG308_RC_OK &&
early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
early_ipl_block_valid = 1;
}
static size_t scpdata_length(const char *buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
break;
count--;
}
return count;
}
static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
size_t count;
size_t i;
int has_lowercase;
count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
ipb->ipl_info.fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
count = 0;
goto out;
}
if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
else
for (i = 0; i < count; i++)
dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
}
static void append_ipl_block_parm(void)
{
char *parm, *delim;
size_t len, rc = 0;
len = strlen(early_command_line);
delim = early_command_line + len; /* '\0' character position */
parm = early_command_line + len + 1; /* append right after '\0' */
switch (early_ipl_block.hdr.pbt) {
case DIAG308_IPL_TYPE_CCW:
rc = ipl_block_get_ascii_vmparm(
parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
break;
case DIAG308_IPL_TYPE_FCP:
rc = ipl_block_get_ascii_scpdata(
parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
break;
}
if (rc) {
if (*parm == '=')
memmove(early_command_line, parm + 1, rc);
else
*delim = ' '; /* replace '\0' with space */
}
}
static inline int has_ebcdic_char(const char *str)
{
int i;
for (i = 0; str[i]; i++)
if (str[i] & 0x80)
return 1;
return 0;
}
void setup_boot_command_line(void)
{
COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
if (has_ebcdic_char(COMMAND_LINE))
EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
strcpy(early_command_line, strim(COMMAND_LINE));
/* append IPL PARM data to the boot command line */
if (early_ipl_block_valid)
append_ipl_block_parm();
}
static char command_line_buf[COMMAND_LINE_SIZE] __section(.data);
static void parse_mem_opt(void)
{
char *param, *val;
bool enabled;
char *args;
int rc;
args = strcpy(command_line_buf, early_command_line);
while (*args) {
args = next_arg(args, &param, &val);
if (!strcmp(param, "mem")) {
memory_end = memparse(val, NULL);
memory_end_set = 1;
}
if (!strcmp(param, "noexec")) {
rc = kstrtobool(val, &enabled);
if (!rc && !enabled)
noexec_disabled = 1;
}
}
}
void setup_memory_end(void)
{
parse_mem_opt();
#ifdef CONFIG_CRASH_DUMP
if (!OLDMEM_BASE && early_ipl_block_valid &&
early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) {
if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
memory_end_set = 1;
}
#endif
}

View File

@ -0,0 +1,2 @@
// SPDX-License-Identifier: GPL-2.0
#include "../kernel/ipl_vmparm.c"

182
arch/s390/boot/mem_detect.c Normal file
View File

@ -0,0 +1,182 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include <asm/sparsemem.h>
#include "compressed/decompressor.h"
#include "boot.h"
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
/* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX \
(256 * (1020 / 2) * sizeof(struct mem_detect_block))
/*
* To avoid corrupting old kernel memory during dump, find lowest memory
* chunk possible either right after the kernel end (decompressed kernel) or
* after initrd (if it is present and there is no hole between the kernel end
* and initrd)
*/
static void *mem_detect_alloc_extended(void)
{
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
INITRD_START < offset + ENTRIES_EXTENDED_MAX)
offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
return (void *)offset;
}
static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
{
if (n < MEM_INLINED_ENTRIES)
return &mem_detect.entries[n];
if (unlikely(!mem_detect.entries_extended))
mem_detect.entries_extended = mem_detect_alloc_extended();
return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
}
/*
* sequential calls to add_mem_detect_block with adjacent memory areas
* are merged together into single memory block.
*/
void add_mem_detect_block(u64 start, u64 end)
{
struct mem_detect_block *block;
if (mem_detect.count) {
block = __get_mem_detect_block_ptr(mem_detect.count - 1);
if (block->end == start) {
block->end = end;
return;
}
}
block = __get_mem_detect_block_ptr(mem_detect.count);
block->start = start;
block->end = end;
mem_detect.count++;
}
static unsigned long get_mem_detect_end(void)
{
if (mem_detect.count)
return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
return 0;
}
static int __diag260(unsigned long rx1, unsigned long rx2)
{
register unsigned long _rx1 asm("2") = rx1;
register unsigned long _rx2 asm("3") = rx2;
register unsigned long _ry asm("4") = 0x10; /* storage configuration */
int rc = -1; /* fail */
unsigned long reg1, reg2;
psw_t old = S390_lowcore.program_new_psw;
asm volatile(
" epsw %0,%1\n"
" st %0,%[psw_pgm]\n"
" st %1,%[psw_pgm]+4\n"
" larl %0,1f\n"
" stg %0,%[psw_pgm]+8\n"
" diag %[rx],%[ry],0x260\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1:\n"
: "=&d" (reg1), "=&a" (reg2),
[psw_pgm] "=Q" (S390_lowcore.program_new_psw),
[rc] "+&d" (rc), [ry] "+d" (_ry)
: [rx] "d" (_rx1), "d" (_rx2)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return rc == 0 ? _ry : -1;
}
static int diag260(void)
{
int rc, i;
struct {
unsigned long start;
unsigned long end;
} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
memset(storage_extents, 0, sizeof(storage_extents));
rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
if (rc == -1)
return -1;
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
return 0;
}
static int tprot(unsigned long addr)
{
unsigned long pgm_addr;
int rc = -EFAULT;
psw_t old = S390_lowcore.program_new_psw;
S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
" larl %[pgm_addr],1f\n"
" stg %[pgm_addr],%[psw_pgm_addr]\n"
" tprot 0(%[addr]),0\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1:\n"
: [pgm_addr] "=&d"(pgm_addr),
[psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
[rc] "+&d"(rc)
: [addr] "a"(addr)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return rc;
}
static void search_mem_end(void)
{
unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
unsigned long offset = 0;
unsigned long pivot;
while (range > 1) {
range >>= 1;
pivot = offset + range;
if (!tprot(pivot << 20))
offset = pivot;
}
add_mem_detect_block(0, (offset + 1) << 20);
}
void detect_memory(void)
{
sclp_early_get_memsize(&max_physmem_end);
if (!sclp_early_read_storage_info()) {
mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
return;
}
if (!diag260()) {
mem_detect.info_source = MEM_DETECT_DIAG260;
return;
}
if (max_physmem_end) {
add_mem_detect_block(0, max_physmem_end);
mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
return;
}
search_mem_end();
mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
max_physmem_end = get_mem_detect_end();
}

64
arch/s390/boot/startup.c Normal file
View File

@ -0,0 +1,64 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <asm/setup.h>
#include <asm/sclp.h>
#include "compressed/decompressor.h"
#include "boot.h"
extern char __boot_data_start[], __boot_data_end[];
void error(char *x)
{
sclp_early_printk("\n\n");
sclp_early_printk(x);
sclp_early_printk("\n\n -- System halted");
disabled_wait(0xdeadbeef);
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
unsigned long mem_safe_offset(void)
{
return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
}
#endif
static void rescue_initrd(void)
{
unsigned long min_initrd_addr;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
min_initrd_addr = mem_safe_offset();
if (min_initrd_addr <= INITRD_START)
return;
memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
}
static void copy_bootdata(void)
{
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
error(".boot.data section size mismatch");
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
}
void startup_kernel(void)
{
void *img;
rescue_initrd();
sclp_early_read_info();
store_ipl_parmblock();
setup_boot_command_line();
setup_memory_end();
detect_memory();
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
}
copy_bootdata();
vmlinux.entry();
}

138
arch/s390/boot/string.c Normal file
View File

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include "../lib/string.c"
int strncmp(const char *cs, const char *ct, size_t count)
{
unsigned char c1, c2;
while (count) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
count--;
}
return 0;
}
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
char *strim(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
return skip_spaces(s);
}
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
static unsigned int simple_guess_base(const char *cp)
{
if (cp[0] == '0') {
if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
return 16;
else
return 8;
} else {
return 10;
}
}
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
unsigned long long simple_strtoull(const char *cp, char **endp,
unsigned int base)
{
unsigned long long result = 0;
if (!base)
base = simple_guess_base(cp);
if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
cp += 2;
while (isxdigit(*cp)) {
unsigned int value;
value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
if (value >= base)
break;
result = result * base + value;
cp++;
}
if (endp)
*endp = (char *)cp;
return result;
}
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
if (*cp == '-')
return -simple_strtoull(cp + 1, endp, base);
return simple_strtoull(cp, endp, base);
}
int kstrtobool(const char *s, bool *res)
{
if (!s)
return -EINVAL;
switch (s[0]) {
case 'y':
case 'Y':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
case '0':
*res = false;
return 0;
case 'o':
case 'O':
switch (s[1]) {
case 'n':
case 'N':
*res = true;
return 0;
case 'f':
case 'F':
*res = false;
return 0;
default:
break;
}
default:
break;
}
return -EINVAL;
}

View File

@ -30,26 +30,31 @@ static DEFINE_SPINLOCK(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct key_blob {
__u8 key[MAXKEYBLOBSIZE];
unsigned int keylen;
};
struct s390_paes_ctx {
struct pkey_seckey sk;
struct key_blob kb;
struct pkey_protkey pk;
unsigned long fc;
};
struct s390_pxts_ctx {
struct pkey_seckey sk[2];
struct key_blob kb[2];
struct pkey_protkey pk[2];
unsigned long fc;
};
static inline int __paes_convert_key(struct pkey_seckey *sk,
static inline int __paes_convert_key(struct key_blob *kb,
struct pkey_protkey *pk)
{
int i, ret;
/* try three times in case of failure */
for (i = 0; i < 3; i++) {
ret = pkey_skey2pkey(sk, pk);
ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
if (ret == 0)
break;
}
@ -61,7 +66,7 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
if (__paes_convert_key(&ctx->kb, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@ -80,10 +85,8 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
if (key_len != SECKEYBLOBSIZE)
return -EINVAL;
memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
memcpy(ctx->kb.key, in_key, key_len);
ctx->kb.keylen = key_len;
if (__paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@ -147,8 +150,8 @@ static struct crypto_alg ecb_paes_alg = {
.cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.min_keysize = MINKEYBLOBSIZE,
.max_keysize = MAXKEYBLOBSIZE,
.setkey = ecb_paes_set_key,
.encrypt = ecb_paes_encrypt,
.decrypt = ecb_paes_decrypt,
@ -160,7 +163,7 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
if (__paes_convert_key(&ctx->kb, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@ -179,7 +182,8 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
memcpy(ctx->kb.key, in_key, key_len);
ctx->kb.keylen = key_len;
if (__cbc_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@ -250,8 +254,8 @@ static struct crypto_alg cbc_paes_alg = {
.cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.min_keysize = MINKEYBLOBSIZE,
.max_keysize = MAXKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_paes_set_key,
.encrypt = cbc_paes_encrypt,
@ -264,8 +268,8 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
__paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
__paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
@ -287,10 +291,16 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len;
unsigned int ckey_len, keytok_len;
memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
if (key_len % 2)
return -EINVAL;
keytok_len = key_len / 2;
memcpy(ctx->kb[0].key, in_key, keytok_len);
ctx->kb[0].keylen = keytok_len;
memcpy(ctx->kb[1].key, in_key + keytok_len, keytok_len);
ctx->kb[1].keylen = keytok_len;
if (__xts_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@ -386,8 +396,8 @@ static struct crypto_alg xts_paes_alg = {
.cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = 2 * SECKEYBLOBSIZE,
.max_keysize = 2 * SECKEYBLOBSIZE,
.min_keysize = 2 * MINKEYBLOBSIZE,
.max_keysize = 2 * MAXKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_paes_set_key,
.encrypt = xts_paes_encrypt,
@ -400,7 +410,7 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(&ctx->sk, &ctx->pk))
if (__paes_convert_key(&ctx->kb, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@ -420,7 +430,8 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
memcpy(ctx->sk.seckey, in_key, key_len);
memcpy(ctx->kb.key, in_key, key_len);
ctx->kb.keylen = key_len;
if (__ctr_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@ -532,8 +543,8 @@ static struct crypto_alg ctr_paes_alg = {
.cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = SECKEYBLOBSIZE,
.max_keysize = SECKEYBLOBSIZE,
.min_keysize = MINKEYBLOBSIZE,
.max_keysize = MAXKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_paes_set_key,
.encrypt = ctr_paes_encrypt,

View File

@ -232,6 +232,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
CONFIG_ZCRYPT_MULTIDEVNODES=y
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m

View File

@ -68,40 +68,44 @@ static int hypfs_sprp_create(void **data_ptr, void **free_ptr, size_t *size)
static int __hypfs_sprp_ioctl(void __user *user_area)
{
struct hypfs_diag304 diag304;
struct hypfs_diag304 *diag304;
unsigned long cmd;
void __user *udata;
void *data;
int rc;
if (copy_from_user(&diag304, user_area, sizeof(diag304)))
return -EFAULT;
if ((diag304.args[0] >> 8) != 0 || diag304.args[1] > DIAG304_CMD_MAX)
return -EINVAL;
rc = -ENOMEM;
data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!data)
return -ENOMEM;
diag304 = kzalloc(sizeof(*diag304), GFP_KERNEL);
if (!data || !diag304)
goto out;
udata = (void __user *)(unsigned long) diag304.data;
if (diag304.args[1] == DIAG304_SET_WEIGHTS ||
diag304.args[1] == DIAG304_SET_CAPPING)
if (copy_from_user(data, udata, PAGE_SIZE)) {
rc = -EFAULT;
rc = -EFAULT;
if (copy_from_user(diag304, user_area, sizeof(*diag304)))
goto out;
rc = -EINVAL;
if ((diag304->args[0] >> 8) != 0 || diag304->args[1] > DIAG304_CMD_MAX)
goto out;
rc = -EFAULT;
udata = (void __user *)(unsigned long) diag304->data;
if (diag304->args[1] == DIAG304_SET_WEIGHTS ||
diag304->args[1] == DIAG304_SET_CAPPING)
if (copy_from_user(data, udata, PAGE_SIZE))
goto out;
}
cmd = *(unsigned long *) &diag304.args[0];
diag304.rc = hypfs_sprp_diag304(data, cmd);
cmd = *(unsigned long *) &diag304->args[0];
diag304->rc = hypfs_sprp_diag304(data, cmd);
if (diag304.args[1] == DIAG304_QUERY_PRP)
if (diag304->args[1] == DIAG304_QUERY_PRP)
if (copy_to_user(udata, data, PAGE_SIZE)) {
rc = -EFAULT;
goto out;
}
rc = copy_to_user(user_area, &diag304, sizeof(diag304)) ? -EFAULT : 0;
rc = copy_to_user(user_area, diag304, sizeof(*diag304)) ? -EFAULT : 0;
out:
kfree(diag304);
free_page((unsigned long) data);
return rc;
}

View File

@ -40,26 +40,27 @@ struct appldata_product_id {
u16 mod_lvl; /* modification level */
} __attribute__ ((packed));
static inline int appldata_asm(struct appldata_product_id *id,
static inline int appldata_asm(struct appldata_parameter_list *parm_list,
struct appldata_product_id *id,
unsigned short fn, void *buffer,
unsigned short length)
{
struct appldata_parameter_list parm_list;
int ry;
if (!MACHINE_IS_VM)
return -EOPNOTSUPP;
parm_list.diag = 0xdc;
parm_list.function = fn;
parm_list.parlist_length = sizeof(parm_list);
parm_list.buffer_length = length;
parm_list.product_id_addr = (unsigned long) id;
parm_list.buffer_addr = virt_to_phys(buffer);
parm_list->diag = 0xdc;
parm_list->function = fn;
parm_list->parlist_length = sizeof(*parm_list);
parm_list->buffer_length = length;
parm_list->product_id_addr = (unsigned long) id;
parm_list->buffer_addr = virt_to_phys(buffer);
diag_stat_inc(DIAG_STAT_X0DC);
asm volatile(
" diag %1,%0,0xdc"
: "=d" (ry)
: "d" (&parm_list), "m" (parm_list), "m" (*id)
: "d" (parm_list), "m" (*parm_list), "m" (*id)
: "cc");
return ry;
}

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_BOOT_DATA_H
#include <asm/setup.h>
#include <asm/ipl.h>
extern char early_command_line[COMMAND_LINE_SIZE];
extern struct ipl_parameter_block early_ipl_block;
extern int early_ipl_block_valid;
#endif /* _ASM_S390_BOOT_DATA_H */

View File

@ -64,6 +64,8 @@ extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
char *bus_id);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);

View File

@ -64,11 +64,10 @@ static inline int test_facility(unsigned long nr)
* @stfle_fac_list: array where facility list can be stored
* @size: size of passed in array in double words
*/
static inline void stfle(u64 *stfle_fac_list, int size)
static inline void __stfle(u64 *stfle_fac_list, int size)
{
unsigned long nr;
preempt_disable();
asm volatile(
" stfl 0(0)\n"
: "=m" (S390_lowcore.stfl_fac_list));
@ -85,6 +84,12 @@ static inline void stfle(u64 *stfle_fac_list, int size)
nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
}
memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
}
static inline void stfle(u64 *stfle_fac_list, int size)
{
preempt_disable();
__stfle(stfle_fac_list, size);
preempt_enable();
}

View File

@ -89,8 +89,8 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void s390_reset_system(void);
extern void ipl_store_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb);
enum ipl_type {
IPL_TYPE_UNKNOWN = 1,

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
#include <asm/pgtable.h>
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
#ifdef CONFIG_KASAN_S390_4_LEVEL_PAGING
#define KASAN_SHADOW_SIZE \
(_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
#else
#define KASAN_SHADOW_SIZE \
(_AC(1, UL) << (_REGION2_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
#endif
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
extern void kasan_early_init(void);
extern void kasan_copy_shadow(pgd_t *dst);
extern void kasan_free_early_identity(void);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_copy_shadow(pgd_t *dst) { }
static inline void kasan_free_early_identity(void) { }
#endif
#endif

View File

@ -102,9 +102,9 @@ struct lowcore {
__u64 current_task; /* 0x0338 */
__u64 kernel_stack; /* 0x0340 */
/* Interrupt, panic and restart stack. */
/* Interrupt, DAT-off and restartstack. */
__u64 async_stack; /* 0x0348 */
__u64 panic_stack; /* 0x0350 */
__u64 nodat_stack; /* 0x0350 */
__u64 restart_stack; /* 0x0358 */
/* Restart function and parameter. */

View File

@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_MEM_DETECT_H
#define _ASM_S390_MEM_DETECT_H
#include <linux/types.h>
enum mem_info_source {
MEM_DETECT_NONE = 0,
MEM_DETECT_SCLP_STOR_INFO,
MEM_DETECT_DIAG260,
MEM_DETECT_SCLP_READ_INFO,
MEM_DETECT_BIN_SEARCH
};
struct mem_detect_block {
u64 start;
u64 end;
};
/*
* Storage element id is defined as 1 byte (up to 256 storage elements).
* In practise only storage element id 0 and 1 are used).
* According to architecture one storage element could have as much as
* 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
* If more mem_detect_blocks are required, a block of memory from already
* known mem_detect_block is taken (entries_extended points to it).
*/
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
struct mem_detect_info {
u32 count;
u8 info_source;
struct mem_detect_block entries[MEM_INLINED_ENTRIES];
struct mem_detect_block *entries_extended;
};
extern struct mem_detect_info mem_detect;
void add_mem_detect_block(u64 start, u64 end);
static inline int __get_mem_detect_block(u32 n, unsigned long *start,
unsigned long *end)
{
if (n >= mem_detect.count) {
*start = 0;
*end = 0;
return -1;
}
if (n < MEM_INLINED_ENTRIES) {
*start = (unsigned long)mem_detect.entries[n].start;
*end = (unsigned long)mem_detect.entries[n].end;
} else {
*start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
*end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
}
return 0;
}
/**
* for_each_mem_detect_block - early online memory range iterator
* @i: an integer used as loop variable
* @p_start: ptr to unsigned long for start address of the range
* @p_end: ptr to unsigned long for end address of the range
*
* Walks over detected online memory ranges.
*/
#define for_each_mem_detect_block(i, p_start, p_end) \
for (i = 0, __get_mem_detect_block(i, p_start, p_end); \
i < mem_detect.count; \
i++, __get_mem_detect_block(i, p_start, p_end))
static inline void get_mem_detect_reserved(unsigned long *start,
unsigned long *size)
{
*start = (unsigned long)mem_detect.entries_extended;
if (mem_detect.count > MEM_INLINED_ENTRIES)
*size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
else
*size = 0;
}
#endif

View File

@ -32,6 +32,8 @@ typedef struct {
unsigned int uses_cmm:1;
/* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1;
/* The mmu context is for compat task */
unsigned int compat_mm:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \

View File

@ -25,6 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
mm->context.compat_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||

View File

@ -161,6 +161,7 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))

View File

@ -341,6 +341,8 @@ static inline int is_module_addr(void *addr)
#define PTRS_PER_P4D _CRST_ENTRIES
#define PTRS_PER_PGD _CRST_ENTRIES
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
@ -466,6 +468,12 @@ static inline int is_module_addr(void *addr)
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_DIRTY)
/*
* Region3 entry (large page) protection definitions.
@ -599,6 +607,14 @@ static inline int pgd_bad(pgd_t pgd)
return (pgd_val(pgd) & mask) != 0;
}
static inline unsigned long pgd_pfn(pgd_t pgd)
{
unsigned long origin_mask;
origin_mask = _REGION_ENTRY_ORIGIN;
return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
}
static inline int p4d_folded(p4d_t p4d)
{
return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
@ -1171,6 +1187,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@ -1210,7 +1227,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))

View File

@ -109,4 +109,30 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain,
u16 *pkeysize, u32 *pattributes);
/*
* In-kernel API: Generate (AES) random protected key.
* @param keytype one of the PKEY_KEYTYPE values
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey);
/*
* In-kernel API: Verify an (AES) protected key.
* @param protkey pointer to buffer containing the protected key to verify
* @return 0 on success, negative errno value on failure. In case the protected
* key is not valid -EKEYREJECTED is returned
*/
int pkey_verifyprotkey(const struct pkey_protkey *protkey);
/*
* In-kernel API: Transform an key blob (of any type) into a protected key.
* @param key pointer to a buffer containing the key blob
* @param keylen size of the key blob in bytes
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
struct pkey_protkey *protkey);
#endif /* _KAPI_PKEY_H */

View File

@ -242,7 +242,7 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
static inline unsigned short stap(void)
static __no_sanitize_address_or_inline unsigned short stap(void)
{
unsigned short cpu_address;
@ -250,6 +250,55 @@ static inline unsigned short stap(void)
return cpu_address;
}
#define CALL_ARGS_0() \
register unsigned long r2 asm("2")
#define CALL_ARGS_1(arg1) \
register unsigned long r2 asm("2") = (unsigned long)(arg1)
#define CALL_ARGS_2(arg1, arg2) \
CALL_ARGS_1(arg1); \
register unsigned long r3 asm("3") = (unsigned long)(arg2)
#define CALL_ARGS_3(arg1, arg2, arg3) \
CALL_ARGS_2(arg1, arg2); \
register unsigned long r4 asm("4") = (unsigned long)(arg3)
#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
CALL_ARGS_3(arg1, arg2, arg3); \
register unsigned long r4 asm("5") = (unsigned long)(arg4)
#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
#define CALL_FMT_0
#define CALL_FMT_1 CALL_FMT_0, "0" (r2)
#define CALL_FMT_2 CALL_FMT_1, "d" (r3)
#define CALL_FMT_3 CALL_FMT_2, "d" (r4)
#define CALL_FMT_4 CALL_FMT_3, "d" (r5)
#define CALL_FMT_5 CALL_FMT_4, "d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
#define CALL_CLOBBER_0 CALL_CLOBBER_1
#define CALL_ON_STACK(fn, stack, nr, args...) \
({ \
CALL_ARGS_##nr(args); \
unsigned long prev; \
\
asm volatile( \
" la %[_prev],0(15)\n" \
" la 15,0(%[_stack])\n" \
" stg %[_prev],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: "+&d" (r2), [_prev] "=&a" (prev) \
: [_stack] "a" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr); \
r2; \
})
/*
* Give up the time slice of the virtual PU.
*/
@ -287,7 +336,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static inline void __load_psw_mask(unsigned long mask)
static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;

View File

@ -252,13 +252,11 @@ struct slsb {
* (for communication with upper layer programs)
* (only required for use with completion queues)
* @flags: flags indicating state of buffer
* @aob: pointer to QAOB used for the particular SBAL
* @user: pointer to upper layer program's state information related to SBAL
* (stored in user1 data of QAOB)
*/
struct qdio_outbuf_state {
u8 flags;
struct qaob *aob;
void *user;
};

View File

@ -95,6 +95,7 @@ extern struct sclp_info sclp;
struct zpci_report_error_header {
u8 version; /* Interface version byte */
u8 action; /* Action qualifier byte
* 0: Adapter Reset Request
* 1: Deconfigure and repair action requested
* (OpenCrypto Problem Call Home)
* 2: Informational Report
@ -104,6 +105,8 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
int sclp_early_read_info(void);
int sclp_early_read_storage_info(void);
int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
@ -111,6 +114,8 @@ void sclp_early_printk(const char *s);
void sclp_early_printk_force(const char *s);
void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);

View File

@ -4,4 +4,16 @@
#include <asm-generic/sections.h>
/*
* .boot.data section contains variables "shared" between the decompressor and
* the decompressed kernel. The decompressor will store values in them, and
* copy over to the decompressed image before starting it.
*
* Each variable end up in its own intermediate section .boot.data.<var name>,
* those sections are later sorted by alignment + name and merged together into
* final .boot.data section, which should be identical in the decompressor and
* the decompressed kernel (that is checked during the build).
*/
#define __bootdata(var) __section(.boot.data.var) var
#endif

View File

@ -65,12 +65,11 @@
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long max_physmem_end;
extern void detect_memory_memblock(void);
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)

View File

@ -53,6 +53,27 @@ char *strstr(const char *s1, const char *s2);
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
extern void *__memcpy(void *dest, const void *src, size_t n);
extern void *__memset(void *s, int c, size_t n);
extern void *__memmove(void *dest, const void *src, size_t n);
/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
*/
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif
#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count);

View File

@ -11,19 +11,24 @@
#include <linux/const.h>
/*
* Size of kernel stack for each process
* General size of kernel stacks
*/
#ifdef CONFIG_KASAN
#define THREAD_SIZE_ORDER 3
#else
#define THREAD_SIZE_ORDER 2
#define ASYNC_ORDER 2
#endif
#define BOOT_STACK_ORDER 2
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/page.h>
#include <asm/processor.h>
#define STACK_INIT_OFFSET \
(THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/page.h>
/*
* .boot.data section is shared between the decompressor code and the
* decompressed kernel. The decompressor will store values in it, and copy
* over to the decompressed image before starting it.
*
* .boot.data variables are kept in separate .boot.data.<var name> sections,
* which are sorted by alignment first, then by name before being merged
* into single .boot.data section. This way big holes cased by page aligned
* structs are avoided and linker produces consistent result.
*/
#define BOOT_DATA \
. = ALIGN(PAGE_SIZE); \
.boot.data : { \
__boot_data_start = .; \
*(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \
__boot_data_end = .; \
}

View File

@ -21,9 +21,13 @@
#define PKEY_IOCTL_MAGIC 'p'
#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
#define PROTKEYBLOBSIZE 80 /* protected key blob size is always 80 bytes */
#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
#define MINKEYBLOBSIZE SECKEYBLOBSIZE /* Minimum size of a key blob */
#define MAXKEYBLOBSIZE PROTKEYBLOBSIZE /* Maximum size of a key blob */
/* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2
@ -129,4 +133,34 @@ struct pkey_verifykey {
#define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */
#define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */
/*
* Generate (AES) random protected key.
*/
struct pkey_genprotk {
__u32 keytype; /* in: key type to generate */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_GENPROTK _IOWR(PKEY_IOCTL_MAGIC, 0x08, struct pkey_genprotk)
/*
* Verify an (AES) protected key.
*/
struct pkey_verifyprotk {
struct pkey_protkey protkey; /* in: the protected key to verify */
};
#define PKEY_VERIFYPROTK _IOW(PKEY_IOCTL_MAGIC, 0x09, struct pkey_verifyprotk)
/*
* Transform an key blob (of any type) into a protected key
*/
struct pkey_kblob2pkey {
__u8 __user *key; /* in: the key blob */
__u32 keylen; /* in: the key blob length */
struct pkey_protkey protkey; /* out: the protected key */
};
#define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
#endif /* _UAPI_PKEY_H */

View File

@ -2,9 +2,9 @@
/*
* include/asm-s390/zcrypt.h
*
* zcrypt 2.1.0 (user-visible header)
* zcrypt 2.2.1 (user-visible header)
*
* Copyright IBM Corp. 2001, 2006
* Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@ -15,12 +15,15 @@
#define __ASM_S390_ZCRYPT_H
#define ZCRYPT_VERSION 2
#define ZCRYPT_RELEASE 1
#define ZCRYPT_RELEASE 2
#define ZCRYPT_VARIANT 1
#include <linux/ioctl.h>
#include <linux/compiler.h>
/* Name of the zcrypt device driver. */
#define ZCRYPT_NAME "zcrypt"
/**
* struct ica_rsa_modexpo
*
@ -309,6 +312,16 @@ struct zcrypt_device_matrix_ext {
#define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT])
#define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT])
/*
* Support for multiple zcrypt device nodes.
*/
/* Nr of minor device node numbers to allocate. */
#define ZCRYPT_MAX_MINOR_NODES 256
/* Max amount of possible ioctls */
#define MAX_ZDEV_IOCTLS (1 << _IOC_NRBITS)
/*
* Only deprecated defines, structs and ioctls below this line.
*/

View File

@ -23,6 +23,10 @@ KCOV_INSTRUMENT_early_nobss.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
KASAN_SANITIZE_early_nobss.o := n
KASAN_SANITIZE_ipl.o := n
KASAN_SANITIZE_machine_kexec.o := n
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
@ -47,7 +51,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o
obj-y += nospec-branch.o ipl_vmparm.o
extra-y += head64.o vmlinux.lds

View File

@ -159,7 +159,7 @@ int main(void)
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack);
OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);

View File

@ -18,7 +18,7 @@
ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
0: lg %r15,__LC_NODAT_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_mcck_handler_fn
lg %r9,0(%r1)

View File

@ -30,7 +30,7 @@
* The stack trace can start at any of the three stacks and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
static unsigned long
static unsigned long __no_sanitize_address
__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
unsigned long low, unsigned long high)
{
@ -77,11 +77,11 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __dump_trace(func, data, sp,
S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
S390_lowcore.panic_stack + frame_size);
S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
S390_lowcore.nodat_stack + frame_size);
#endif
sp = __dump_trace(func, data, sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size - THREAD_SIZE,
S390_lowcore.async_stack + frame_size);
task = task ?: current;
__dump_trace(func, data, sp,
@ -124,7 +124,7 @@ void show_registers(struct pt_regs *regs)
char *mode;
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs))
pr_cont(" (%pSR)", (void *)regs->psw.addr);
pr_cont("\n");

View File

@ -29,10 +29,9 @@
#include <asm/cpcmd.h>
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
#include "entry.h"
static void __init setup_boot_command_line(void);
/*
* Initialize storage key for kernel pages
*/
@ -284,51 +283,11 @@ static int __init cad_setup(char *str)
}
early_param("cad", cad_setup);
/* Set up boot command line */
static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
char *parm, *delim;
size_t rc, len;
len = strlen(boot_command_line);
delim = boot_command_line + len; /* '\0' character position */
parm = boot_command_line + len + 1; /* append right after '\0' */
rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
if (rc) {
if (*parm == '=')
memmove(boot_command_line, parm + 1, rc);
else
*delim = ' '; /* replace '\0' with space */
}
}
static inline int has_ebcdic_char(const char *str)
{
int i;
for (i = 0; str[i]; i++)
if (str[i] & 0x80)
return 1;
return 0;
}
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
static void __init setup_boot_command_line(void)
{
COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
if (has_ebcdic_char(COMMAND_LINE))
EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
strlcpy(boot_command_line, strstrip(COMMAND_LINE),
ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */
if (MACHINE_IS_VM)
append_to_cmdline(append_ipl_vmparm);
append_to_cmdline(append_ipl_scpdata);
strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
}
static void __init check_image_bootable(void)

View File

@ -13,8 +13,8 @@
#include <linux/string.h>
#include <asm/sections.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/timex.h>
#include <asm/kasan.h>
#include "entry.h"
static void __init reset_tod_clock(void)
@ -32,26 +32,6 @@ static void __init reset_tod_clock(void)
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
static void __init rescue_initrd(void)
{
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
/*
* Just like in case of IPL from VM reader we make sure there is a
* gap of 4MB between end of kernel and start of initrd.
* That way we can also be sure that saving an NSS will succeed,
* which however only requires different segments.
*/
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= min_initrd_addr)
return;
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
}
static void __init clear_bss_section(void)
{
memset(__bss_start, 0, __bss_stop - __bss_start);
@ -60,6 +40,6 @@ static void __init clear_bss_section(void)
void __init startup_init_nobss(void)
{
reset_tod_clock();
rescue_initrd();
clear_bss_section();
kasan_early_init();
}

View File

@ -85,14 +85,34 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
.macro CHECK_STACK stacksize,savearea
.macro CHECK_STACK savearea
#ifdef CONFIG_CHECK_STACK
tml %r15,\stacksize - CONFIG_STACK_GUARD
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
lghi %r14,\savearea
jz stack_overflow
#endif
.endm
.macro CHECK_VMAP_STACK savearea,oklabel
#ifdef CONFIG_VMAP_STACK
lgr %r14,%r15
nill %r14,0x10000 - STACK_SIZE
oill %r14,STACK_INIT
clg %r14,__LC_KERNEL_STACK
je \oklabel
clg %r14,__LC_ASYNC_STACK
je \oklabel
clg %r14,__LC_NODAT_STACK
je \oklabel
clg %r14,__LC_RESTART_STACK
je \oklabel
lghi %r14,\savearea
j stack_overflow
#else
j \oklabel
#endif
.endm
.macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
@ -104,11 +124,11 @@ _LPP_OFFSET = __LC_LPP
brasl %r14,cleanup_critical
tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
jnz 2f
CHECK_STACK 1<<STACK_SHIFT,\savearea
CHECK_STACK \savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
1: UPDATE_VTIME %r14,%r15,\timer
@ -600,9 +620,10 @@ ENTRY(pgm_check_handler)
jnz 1f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
1: CHECK_STACK __LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 4f
# CHECK_VMAP_STACK branches to stack_overflow or 4f
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
@ -1136,7 +1157,8 @@ ENTRY(mcck_int_handler)
jnz 4f
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
@ -1163,7 +1185,6 @@ ENTRY(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF
@ -1182,7 +1203,7 @@ ENTRY(mcck_int_handler)
lpswe __LC_RETURN_MCCK_PSW
.Lmcck_panic:
lg %r15,__LC_PANIC_STACK
lg %r15,__LC_NODAT_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
@ -1193,12 +1214,10 @@ ENTRY(restart_int_handler)
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
lg %r15,__LC_RESTART_STACK
aghi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
stmg %r0,%r14,__PT_R0(%r15)
mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
lg %r2,__LC_RESTART_DATA
@ -1216,14 +1235,14 @@ ENTRY(restart_int_handler)
.section .kprobes.text, "ax"
#ifdef CONFIG_CHECK_STACK
#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
/*
* The synchronous or the asynchronous stack overflowed. We are dead.
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
lg %r15,__LC_PANIC_STACK # change to panic stack
lg %r15,__LC_NODAT_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)

View File

@ -86,4 +86,7 @@ DECLARE_PER_CPU(u64, mt_cycles[8]);
void gs_load_bc_cb(struct pt_regs *regs);
void set_fs_fixup(void);
unsigned long stack_alloc(void);
void stack_free(unsigned long stack);
#endif /* _ENTRY_H */

View File

@ -14,6 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
__HEAD
ENTRY(startup_continue)
@ -35,10 +36,7 @@ ENTRY(startup_continue)
#
larl %r14,init_task
stg %r14,__LC_CURRENT
larl %r15,init_thread_union
aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
#
# Early setup functions that may not rely on an initialized bss section,
# like moving the initrd. Returns with an initialized bss section.

View File

@ -29,6 +29,8 @@
#include <asm/checksum.h>
#include <asm/debug.h>
#include <asm/os_info.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
@ -117,6 +119,9 @@ static char *dump_type_str(enum dump_type type)
}
}
struct ipl_parameter_block __bootdata(early_ipl_block);
int __bootdata(early_ipl_block_valid);
static int ipl_block_valid;
static struct ipl_parameter_block ipl_block;
@ -151,6 +156,8 @@ static inline int __diag308(unsigned long subcode, void *addr)
int diag308(unsigned long subcode, void *addr)
{
if (IS_ENABLED(CONFIG_KASAN))
__arch_local_irq_stosm(0x04); /* enable DAT */
diag_stat_inc(DIAG_STAT_X308);
return __diag308(subcode, addr);
}
@ -262,115 +269,16 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
static size_t reipl_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
size_t len;
char has_lowercase = 0;
len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
for (i = 0; i < len; i++)
if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
(dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
(dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
has_lowercase = 1;
break;
}
if (!has_lowercase)
EBC_TOLOWER(dest, len);
EBCASC(dest, len);
}
dest[len] = 0;
return len;
}
size_t append_ipl_vmparm(char *dest, size_t size)
{
size_t rc;
rc = 0;
if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)
rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
return rc;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
append_ipl_vmparm(parm, sizeof(parm));
if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
return sprintf(page, "%s\n", parm);
}
static size_t scpdata_length(const char* buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
break;
count--;
}
return count;
}
static size_t reipl_append_ascii_scpdata(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
size_t count;
size_t i;
int has_lowercase;
count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
ipb->ipl_info.fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
count = 0;
goto out;
}
if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
else
for (i = 0; i < count; i++)
dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
}
size_t append_ipl_scpdata(char *dest, size_t len)
{
size_t rc;
rc = 0;
if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
else
dest[0] = 0;
return rc;
}
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
@ -564,7 +472,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
ipl_block_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
@ -1769,11 +1677,10 @@ void __init setup_ipl(void)
void __init ipl_store_parameters(void)
{
int rc;
rc = diag308(DIAG308_STORE, &ipl_block);
if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
if (early_ipl_block_valid) {
memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block));
ipl_block_valid = 1;
}
}
void s390_reset_system(void)

View File

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/ebcdic.h>
#include <asm/ipl.h>
/* VM IPL PARM routines */
size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
size_t len;
char has_lowercase = 0;
len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
for (i = 0; i < len; i++)
if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
(dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
(dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
has_lowercase = 1;
break;
}
if (!has_lowercase)
EBC_TOLOWER(dest, len);
EBCASC(dest, len);
}
dest[len] = 0;
return len;
}

View File

@ -172,15 +172,7 @@ void do_softirq_own_stack(void)
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
/* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
asm volatile(" la 15,0(%0)\n"
" brasl 14,__do_softirq\n"
" la 15,0(%1)\n"
: : "a" (new), "a" (old)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
CALL_ON_STACK(__do_softirq, new, 0);
} else {
/* We are already on the async stack. */
__do_softirq();

View File

@ -142,18 +142,27 @@ static noinline void __machine_kdump(void *image)
}
#endif
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
*/
static bool kdump_csum_valid(struct kimage *image)
static unsigned long do_start_kdump(unsigned long addr)
{
#ifdef CONFIG_CRASH_DUMP
struct kimage *image = (struct kimage *) addr;
int (*start_kdump)(int) = (void *)image->start;
int rc;
__arch_local_irq_stnsm(0xfb); /* disable DAT */
rc = start_kdump(0);
__arch_local_irq_stosm(0x04); /* enable DAT */
return rc;
}
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
*/
static bool kdump_csum_valid(struct kimage *image)
{
#ifdef CONFIG_CRASH_DUMP
int rc;
rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
return rc == 0;
#else
return false;

View File

@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
#include <asm/alternative.h>
@ -32,12 +33,18 @@
void *module_alloc(unsigned long size)
{
void *p;
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC,
0, NUMA_NO_NODE,
__builtin_return_address(0));
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
return NULL;
}
return p;
}
void module_arch_freeing_init(struct module *mod)

View File

@ -2045,14 +2045,17 @@ static int __init init_cpum_sampling_pmu(void)
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)
if (!sfdbg) {
pr_err("Registering for s390dbf failed\n");
return -ENOMEM;
}
debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
debug_unregister(sfdbg);
goto out;
}
@ -2061,6 +2064,7 @@ static int __init init_cpum_sampling_pmu(void)
pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
debug_unregister(sfdbg);
goto out;
}

View File

@ -49,6 +49,7 @@
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <asm/ipl.h>
#include <asm/facility.h>
@ -69,6 +70,7 @@
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
#include "entry.h"
/*
@ -88,9 +90,11 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
int __initdata memory_end_set;
unsigned long __initdata memory_end;
unsigned long __initdata max_physmem_end;
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end);
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@ -283,15 +287,6 @@ void machine_power_off(void)
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off);
static int __init early_parse_mem(char *p)
{
memory_end = memparse(p, &p);
memory_end &= PAGE_MASK;
memory_end_set = 1;
return 0;
}
early_param("mem", early_parse_mem);
static int __init parse_vmalloc(char *arg)
{
if (!arg)
@ -303,6 +298,78 @@ early_param("vmalloc", parse_vmalloc);
void *restart_stack __section(.data);
unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
return (unsigned long)
__vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
VMALLOC_START, VMALLOC_END,
THREADINFO_GFP,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
#else
return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
}
void stack_free(unsigned long stack)
{
#ifdef CONFIG_VMAP_STACK
vfree((void *) stack);
#else
free_pages(stack, THREAD_SIZE_ORDER);
#endif
}
int __init arch_early_irq_init(void)
{
unsigned long stack;
stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!stack)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
return 0;
}
static int __init async_stack_realloc(void)
{
unsigned long old, new;
old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
new = stack_alloc();
if (!new)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
free_pages(old, THREAD_SIZE_ORDER);
return 0;
}
early_initcall(async_stack_realloc);
void __init arch_call_rest_init(void)
{
struct stack_frame *frame;
unsigned long stack;
stack = stack_alloc();
if (!stack)
panic("Couldn't allocate kernel stack");
current->stack = (void *) stack;
#ifdef CONFIG_VMAP_STACK
current->stack_vm_area = (void *) stack;
#endif
set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack;
frame = (struct stack_frame *) stack;
memset(frame, 0, sizeof(*frame));
/* Branch to rest_init on the new stack, never returns */
asm volatile(
" la 15,0(%[_frame])\n"
" jg rest_init\n"
: : [_frame] "a" (frame));
}
static void __init setup_lowcore(void)
{
struct lowcore *lc;
@ -329,14 +396,8 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
lc->kernel_stack = ((unsigned long) &init_thread_union)
lc->nodat_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
@ -357,8 +418,12 @@ static void __init setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
restart_stack += ASYNC_SIZE;
/*
* Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart.
*/
restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
@ -467,19 +532,26 @@ static void __init setup_memory_end(void)
{
unsigned long vmax, vmalloc_size, tmp;
/* Choose kernel address space layout: 2, 3, or 4 levels. */
/* Choose kernel address space layout: 3 or 4 levels. */
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
if (IS_ENABLED(CONFIG_KASAN)) {
vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
? _REGION1_SIZE
: _REGION2_SIZE;
} else {
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
}
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
VMALLOC_START = vmax - vmalloc_size;
VMALLOC_START = VMALLOC_END - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
@ -491,7 +563,12 @@ static void __init setup_memory_end(void)
vmemmap = (struct page *) tmp;
/* Take care that memory_end is set and <= vmemmap */
memory_end = min(memory_end ?: max_physmem_end, tmp);
memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
#ifdef CONFIG_KASAN
/* fit in kasan shadow memory region between 1:1 and vmemmap */
memory_end = min(memory_end, KASAN_SHADOW_START);
vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
#endif
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
memblock_remove(memory_end, ULONG_MAX);
@ -532,17 +609,8 @@ static struct notifier_block kdump_mem_nb = {
*/
static void reserve_memory_end(void)
{
#ifdef CONFIG_CRASH_DUMP
if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
!OLDMEM_BASE && sclp.hsa_size) {
memory_end = sclp.hsa_size;
memory_end &= PAGE_MASK;
memory_end_set = 1;
}
#endif
if (!memory_end_set)
return;
memblock_reserve(memory_end, ULONG_MAX);
if (memory_end_set)
memblock_reserve(memory_end, ULONG_MAX);
}
/*
@ -649,6 +717,62 @@ static void __init reserve_initrd(void)
#endif
}
static void __init reserve_mem_detect_info(void)
{
unsigned long start, size;
get_mem_detect_reserved(&start, &size);
if (size)
memblock_reserve(start, size);
}
static void __init free_mem_detect_info(void)
{
unsigned long start, size;
get_mem_detect_reserved(&start, &size);
if (size)
memblock_free(start, size);
}
static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
start, start + size - 1);
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
static const char * __init get_mem_info_source(void)
{
switch (mem_detect.info_source) {
case MEM_DETECT_SCLP_STOR_INFO:
return "sclp storage info";
case MEM_DETECT_DIAG260:
return "diag260";
case MEM_DETECT_SCLP_READ_INFO:
return "sclp read info";
case MEM_DETECT_BIN_SEARCH:
return "binary search";
}
return "none";
}
static void __init memblock_add_mem_detect_info(void)
{
unsigned long start, end;
int i;
memblock_dbg("physmem info source: %s (%hhd)\n",
get_mem_info_source(), mem_detect.info_source);
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
for_each_mem_detect_block(i, &start, &end)
memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false);
memblock_dump_all();
}
/*
* Check for initrd being in usable memory
*/
@ -913,11 +1037,13 @@ void __init setup_arch(char **cmdline_p)
reserve_oldmem();
reserve_kernel();
reserve_initrd();
reserve_mem_detect_info();
memblock_allow_resize();
/* Get information about *all* installed memory */
detect_memory_memblock();
memblock_add_mem_detect_info();
free_mem_detect_info();
remove_oldmem();
/*

View File

@ -186,36 +186,34 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
pcpu_sigp_retry(pcpu, order, 0);
}
#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, panic_stack;
unsigned long async_stack, nodat_stack;
struct lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
panic_stack = __get_free_page(GFP_KERNEL);
if (!pcpu->lowcore || !panic_stack || !async_stack)
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!pcpu->lowcore || !nodat_stack)
goto out;
} else {
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
}
async_stack = stack_alloc();
if (!async_stack)
goto out;
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->async_stack = async_stack + STACK_INIT_OFFSET;
lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (nmi_alloc_per_cpu(lc))
goto out;
goto out_async;
if (vdso_alloc_per_cpu(lc))
goto out_mcesa;
lowcore_ptr[cpu] = lc;
@ -224,10 +222,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
out_mcesa:
nmi_free_per_cpu(lc);
out_async:
stack_free(async_stack);
out:
if (pcpu != &pcpu_devices[0]) {
free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER);
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
return -ENOMEM;
@ -237,15 +236,21 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
unsigned long async_stack, nodat_stack, lowcore;
nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
lowcore = (unsigned long) pcpu->lowcore;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
vdso_free_per_cpu(pcpu->lowcore);
nmi_free_per_cpu(pcpu->lowcore);
stack_free(async_stack);
if (pcpu == &pcpu_devices[0])
return;
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages(lowcore, LC_ORDER);
}
#endif /* CONFIG_HOTPLUG_CPU */
@ -293,7 +298,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
struct lowcore *lc = pcpu->lowcore;
lc->restart_stack = lc->kernel_stack;
lc->restart_stack = lc->nodat_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
lc->restart_source = -1UL;
@ -303,15 +308,21 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
/*
* Call function via PSW restart on pcpu and stop the current cpu.
*/
static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
void *data, unsigned long stack)
static void __pcpu_delegate(void (*func)(void*), void *data)
{
func(data); /* should not return */
}
static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
void (*func)(void *),
void *data, unsigned long stack)
{
struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
__load_psw_mask(PSW_KERNEL_BITS);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
if (pcpu->address == source_cpu)
func(data); /* should not return */
CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
/* Stop target cpu (if func returns this stops the current cpu). */
pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
/* Restart func on the target cpu and stop the current cpu. */
@ -372,8 +383,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
pcpu_delegate(&pcpu_devices[0], func, data,
pcpu_devices->lowcore->panic_stack -
PANIC_FRAME_OFFSET + PAGE_SIZE);
pcpu_devices->lowcore->nodat_stack);
}
int smp_find_processor_id(u16 address)
@ -791,37 +801,42 @@ void __init smp_detect_cpus(void)
memblock_free_early((unsigned long)info, sizeof(*info));
}
/*
* Activate a secondary processor.
*/
static void smp_start_secondary(void *cpuvoid)
static void smp_init_secondary(void)
{
int cpu = smp_processor_id();
S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long) restart_stack;
S390_lowcore.restart_fn = (unsigned long) do_restart;
S390_lowcore.restart_data = 0;
S390_lowcore.restart_source = -1UL;
restore_access_regs(S390_lowcore.access_regs_save_area);
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
cpu_init();
preempt_disable();
init_cpu_timer();
vtime_init();
pfault_init();
notify_cpu_starting(cpu);
notify_cpu_starting(smp_processor_id());
if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
set_cpu_online(cpu, true);
set_cpu_online(smp_processor_id(), true);
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
/*
* Activate a secondary processor.
*/
static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
{
S390_lowcore.restart_stack = (unsigned long) restart_stack;
S390_lowcore.restart_fn = (unsigned long) do_restart;
S390_lowcore.restart_data = 0;
S390_lowcore.restart_source = -1UL;
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
}
/* Upping and downing of CPUs */
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{

View File

@ -183,17 +183,19 @@ static void fill_hdr(struct sthyi_sctns *sctns)
static void fill_stsi_mac(struct sthyi_sctns *sctns,
struct sysinfo_1_1_1 *sysinfo)
{
sclp_ocf_cpc_name_copy(sctns->mac.infmname);
if (*(u64 *)sctns->mac.infmname != 0)
sctns->mac.infmval1 |= MAC_NAME_VLD;
if (stsi(sysinfo, 1, 1, 1))
return;
sclp_ocf_cpc_name_copy(sctns->mac.infmname);
memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
sctns->mac.infmval1 |= MAC_ID_VLD;
}
static void fill_stsi_par(struct sthyi_sctns *sctns,

View File

@ -29,10 +29,11 @@
.section .text
ENTRY(swsusp_arch_suspend)
stmg %r6,%r15,__SF_GPRS(%r15)
lg %r1,__LC_NODAT_STACK
aghi %r1,-STACK_FRAME_OVERHEAD
stmg %r6,%r15,__SF_GPRS(%r1)
stg %r15,__SF_BACKCHAIN(%r1)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
/* Store FPU registers */
brasl %r14,save_fpu_regs
@ -197,9 +198,7 @@ pgm_check_entry:
brc 2,3b /* busy, try again */
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
aghi %r15,-STACK_FRAME_OVERHEAD
larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
larl %r2,.Lpanic_string
brasl %r14,sclp_early_printk_force
larl %r3,.Ldisabled_wait_31

View File

@ -56,7 +56,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (vma->vm_mm->context.compat_mm) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
}
@ -77,7 +77,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task())
if (vma->vm_mm->context.compat_mm)
vdso_pages = vdso32_pages;
#endif
@ -224,8 +224,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task())
if (is_compat_task()) {
vdso_pages = vdso32_pages;
mm->context.compat_mm = 1;
}
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for

View File

@ -28,9 +28,10 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
# Disable gcov profiling and ubsan for VDSO code
# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so

View File

@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
.text
.align 4
@ -18,8 +19,8 @@
__kernel_clock_gettime:
CFI_STARTPROC
ahi %r15,-16
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME_COARSE
@ -72,13 +73,13 @@ __kernel_clock_gettime:
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
ahi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 9b
@ -158,17 +159,17 @@ __kernel_clock_gettime:
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
ahi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* Fallback to system call */
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
19: lhi %r1,__NR_clock_gettime
svc 0
ahi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC

View File

@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
.text
.align 4
@ -19,7 +20,7 @@ __kernel_gettimeofday:
CFI_STARTPROC
ahi %r15,-16
CFI_ADJUST_CFA_OFFSET 16
CFI_VAL_OFFSET 15, -160
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
basr %r5,0
0: al %r5,13f-0b(%r5) /* get &_vdso_data */
1: ltr %r3,%r3 /* check if tz is NULL */

View File

@ -28,9 +28,10 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
# Disable gcov profiling and ubsan for VDSO code
# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so

View File

@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
.text
.align 4
@ -18,8 +19,8 @@
__kernel_clock_gettime:
CFI_STARTPROC
aghi %r15,-16
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
@ -56,13 +57,13 @@ __kernel_clock_gettime:
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 3b
@ -115,13 +116,13 @@ __kernel_clock_gettime:
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CPUCLOCK_VIRT for this thread */
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
9: lghi %r4,0
icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
@ -142,17 +143,17 @@ __kernel_clock_gettime:
stg %r4,8(%r3)
lghi %r2,0
aghi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* Fallback to system call */
CFI_DEF_CFA_OFFSET 176
CFI_VAL_OFFSET 15, -160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
12: lghi %r1,__NR_clock_gettime
svc 0
aghi %r15,16
CFI_DEF_CFA_OFFSET 160
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC

View File

@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
#include <asm/ptrace.h>
.text
.align 4
@ -19,7 +20,7 @@ __kernel_gettimeofday:
CFI_STARTPROC
aghi %r15,-16
CFI_ADJUST_CFA_OFFSET 16
CFI_VAL_OFFSET 15, -160
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
larl %r5,_vdso_data
0: ltgr %r3,%r3 /* check if tz is NULL */
je 1f

View File

@ -16,6 +16,7 @@
#define RO_AFTER_INIT_DATA
#include <asm-generic/vmlinux.lds.h>
#include <asm/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
@ -134,6 +135,8 @@ SECTIONS
__nospec_return_end = . ;
}
BOOT_DATA
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
@ -146,6 +149,19 @@ SECTIONS
_end = . ;
/*
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
*/
.vmlinux.info 0 : {
QUAD(_stext) /* default_lma */
QUAD(startup_continue) /* entry */
QUAD(__bss_start - _stext) /* image_size */
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
}
/* Debugging sections. */
STABS_DEBUG
DWARF_DEBUG

View File

@ -9,5 +9,9 @@ lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
# Instrumenting memory accesses to __user data (in different address space)
# produce false positives
KASAN_SANITIZE_uaccess.o := n
chkbss := mem.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss

View File

@ -14,7 +14,8 @@
/*
* void *memmove(void *dest, const void *src, size_t n)
*/
ENTRY(memmove)
WEAK(memmove)
ENTRY(__memmove)
ltgr %r4,%r4
lgr %r1,%r2
jz .Lmemmove_exit
@ -47,6 +48,7 @@ ENTRY(memmove)
BR_EX %r14
.Lmemmove_mvc:
mvc 0(1,%r1),0(%r3)
ENDPROC(__memmove)
EXPORT_SYMBOL(memmove)
/*
@ -64,7 +66,8 @@ EXPORT_SYMBOL(memmove)
* return __builtin_memset(s, c, n);
* }
*/
ENTRY(memset)
WEAK(memset)
ENTRY(__memset)
ltgr %r4,%r4
jz .Lmemset_exit
ltgr %r3,%r3
@ -108,6 +111,7 @@ ENTRY(memset)
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
ENDPROC(__memset)
EXPORT_SYMBOL(memset)
/*
@ -115,7 +119,8 @@ EXPORT_SYMBOL(memset)
*
* void *memcpy(void *dest, const void *src, size_t n)
*/
ENTRY(memcpy)
WEAK(memcpy)
ENTRY(__memcpy)
ltgr %r4,%r4
jz .Lmemcpy_exit
aghi %r4,-1
@ -136,6 +141,7 @@ ENTRY(memcpy)
j .Lmemcpy_remainder
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
ENDPROC(__memcpy)
EXPORT_SYMBOL(memcpy)
/*

View File

@ -4,10 +4,12 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
obj-y += page-states.o gup.o pageattr.o mem_detect.o
obj-y += pgtable.o pgalloc.o
obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
KASAN_SANITIZE_kasan_init.o := n
obj-$(CONFIG_KASAN) += kasan_init.o

View File

@ -3,6 +3,8 @@
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kasan.h>
#include <asm/kasan.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@ -17,18 +19,26 @@ enum address_markers_idx {
IDENTITY_NR = 0,
KERNEL_START_NR,
KERNEL_END_NR,
#ifdef CONFIG_KASAN
KASAN_SHADOW_START_NR,
KASAN_SHADOW_END_NR,
#endif
VMEMMAP_NR,
VMALLOC_NR,
MODULES_NR,
};
static struct addr_marker address_markers[] = {
[IDENTITY_NR] = {0, "Identity Mapping"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
[MODULES_NR] = {0, "Modules Area"},
[IDENTITY_NR] = {0, "Identity Mapping"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
#ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
[MODULES_NR] = {0, "Modules Area"},
{ -1, NULL }
};
@ -80,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
/* Print the actual finished series */
seq_printf(m, "0x%0*lx-0x%0*lx",
seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
@ -90,7 +100,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
if (st->current_address >= st->marker[1].start_address) {
while (st->current_address >= st->marker[1].start_address) {
st->marker++;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
@ -100,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
}
#ifdef CONFIG_KASAN
static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
{
unsigned int prot;
prot = pte_val(*kasan_zero_pte) &
(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
note_page(m, st, prot, 4);
}
#endif
/*
* The actual page table walker functions. In order to keep the
* implementation of print_prot() short, we only check and pass
@ -132,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd;
int i;
#ifdef CONFIG_KASAN
if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
note_kasan_zero_page(m, st);
return;
}
#endif
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
st->current_address = addr;
pmd = pmd_offset(pud, addr);
@ -156,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud_t *pud;
int i;
#ifdef CONFIG_KASAN
if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
note_kasan_zero_page(m, st);
return;
}
#endif
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
st->current_address = addr;
pud = pud_offset(p4d, addr);
@ -179,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
p4d_t *p4d;
int i;
#ifdef CONFIG_KASAN
if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
note_kasan_zero_page(m, st);
return;
}
#endif
for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
st->current_address = addr;
p4d = p4d_offset(pgd, addr);

View File

@ -636,17 +636,19 @@ struct pfault_refbk {
u64 reserved;
} __attribute__ ((packed, aligned(8)));
static struct pfault_refbk pfault_init_refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_LPP,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD
};
int pfault_init(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_LPP,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD };
int rc;
if (pfault_disable)
@ -658,18 +660,20 @@ int pfault_init(void)
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
: "=d" (rc)
: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
return rc;
}
static struct pfault_refbk pfault_fini_refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
void pfault_fini(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
if (pfault_disable)
return;
@ -678,7 +682,7 @@ void pfault_fini(void)
" diag %0,0,0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);

View File

@ -42,6 +42,7 @@
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/set_memory.h>
#include <asm/kasan.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
@ -98,8 +99,9 @@ void __init paging_init(void)
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
kasan_copy_shadow(init_mm.pgd);
/* enable virtual mapping in kernel mode */
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
@ -107,6 +109,7 @@ void __init paging_init(void)
psw_bits(psw).dat = 1;
psw_bits(psw).as = PSW_BITS_AS_HOME;
__load_psw_mask(psw.mask);
kasan_free_early_identity();
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();

387
arch/s390/mm/kasan_init.c Normal file
View File

@ -0,0 +1,387 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kasan.h>
#include <linux/sched/task.h>
#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/kasan.h>
#include <asm/mem_detect.h>
#include <asm/processor.h>
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/setup.h>
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata;
static unsigned long pgalloc_low __initdata;
static unsigned long pgalloc_freeable __initdata;
static bool has_edat __initdata;
static bool has_nx __initdata;
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
static void __init kasan_early_panic(const char *reason)
{
sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
sclp_early_printk(reason);
disabled_wait(0);
}
static void * __init kasan_early_alloc_segment(void)
{
segment_pos -= _SEGMENT_SIZE;
if (segment_pos < segment_low)
kasan_early_panic("out of memory during initialisation\n");
return (void *)segment_pos;
}
static void * __init kasan_early_alloc_pages(unsigned int order)
{
pgalloc_pos -= (PAGE_SIZE << order);
if (pgalloc_pos < pgalloc_low)
kasan_early_panic("out of memory during initialisation\n");
return (void *)pgalloc_pos;
}
static void * __init kasan_early_crst_alloc(unsigned long val)
{
unsigned long *table;
table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
if (table)
crst_table_init(table, val);
return table;
}
static pte_t * __init kasan_early_pte_alloc(void)
{
static void *pte_leftover;
pte_t *pte;
BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
if (!pte_leftover) {
pte_leftover = kasan_early_alloc_pages(0);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
enum populate_mode {
POPULATE_ONE2ONE,
POPULATE_MAP,
POPULATE_ZERO_SHADOW
};
static void __init kasan_early_vmemmap_populate(unsigned long address,
unsigned long end,
enum populate_mode mode)
{
unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
pgd_t *pg_dir;
p4d_t *p4_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
if (!has_nx)
pgt_prot_zero &= ~_PAGE_NOEXEC;
pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PGDIR_SIZE) &&
end - address >= PGDIR_SIZE) {
pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
continue;
}
p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pg_dir, p4_dir);
}
p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, P4D_SIZE) &&
end - address >= P4D_SIZE) {
p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4_dir, pu_dir);
}
pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PUD_SIZE) &&
end - address >= PUD_SIZE) {
pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
address = (address + PUD_SIZE) & PUD_MASK;
continue;
}
pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PMD_SIZE) &&
end - address >= PMD_SIZE) {
pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
/* the first megabyte of 1:1 is mapped with 4k pages */
if (has_edat && address && end - address >= PMD_SIZE &&
mode != POPULATE_ZERO_SHADOW) {
void *page;
if (mode == POPULATE_ONE2ONE) {
page = (void *)address;
} else {
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = kasan_early_pte_alloc();
pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
void *page;
switch (mode) {
case POPULATE_ONE2ONE:
page = (void *)address;
pte_val(*pt_dir) = __pa(page) | pgt_prot;
break;
case POPULATE_MAP:
page = kasan_early_alloc_pages(0);
memset(page, 0, PAGE_SIZE);
pte_val(*pt_dir) = __pa(page) | pgt_prot;
break;
case POPULATE_ZERO_SHADOW:
page = kasan_zero_page;
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
break;
}
}
address += PAGE_SIZE;
}
}
static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
{
unsigned long asce_bits;
asce_bits = asce_type | _ASCE_TABLE_LENGTH;
S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
}
static void __init kasan_enable_dat(void)
{
psw_t psw;
psw.mask = __extract_psw();
psw_bits(psw).dat = 1;
psw_bits(psw).as = PSW_BITS_AS_HOME;
__load_psw_mask(psw.mask);
}
static void __init kasan_early_detect_facilities(void)
{
__stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
if (test_facility(8)) {
has_edat = true;
__ctl_set_bit(0, 23);
}
if (!noexec_disabled && test_facility(130)) {
has_nx = true;
__ctl_set_bit(0, 20);
}
}
static unsigned long __init get_mem_detect_end(void)
{
unsigned long start;
unsigned long end;
if (mem_detect.count) {
__get_mem_detect_block(mem_detect.count - 1, &start, &end);
return end;
}
return 0;
}
void __init kasan_early_init(void)
{
unsigned long untracked_mem_end;
unsigned long shadow_alloc_size;
unsigned long initrd_end;
unsigned long asce_type;
unsigned long memsize;
unsigned long vmax;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
pte_t pte_z;
pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
kasan_early_detect_facilities();
if (!has_nx)
pgt_prot &= ~_PAGE_NOEXEC;
pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
memsize = get_mem_detect_end();
if (!memsize)
kasan_early_panic("cannot detect physical memory size\n");
/* respect mem= cmdline parameter */
if (memory_end_set && memsize > memory_end)
memsize = memory_end;
memsize = min(memsize, KASAN_SHADOW_START);
if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
/* 4 level paging */
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir,
_REGION2_ENTRY_EMPTY);
untracked_mem_end = vmax = _REGION1_SIZE;
asce_type = _ASCE_TYPE_REGION2;
} else {
/* 3 level paging */
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
crst_table_init((unsigned long *)early_pg_dir,
_REGION3_ENTRY_EMPTY);
untracked_mem_end = vmax = _REGION2_SIZE;
asce_type = _ASCE_TYPE_REGION3;
}
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
initrd_end =
round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
pgalloc_low = max(pgalloc_low, initrd_end);
}
if (pgalloc_low + shadow_alloc_size > memsize)
kasan_early_panic("out of memory during initialisation\n");
if (has_edat) {
segment_pos = round_down(memsize, _SEGMENT_SIZE);
segment_low = segment_pos - shadow_alloc_size;
pgalloc_pos = segment_low;
} else {
pgalloc_pos = memsize;
}
init_mm.pgd = early_pg_dir;
/*
* Current memory layout:
* +- 0 -------------+ +- shadow start -+
* | 1:1 ram mapping | /| 1/8 ram |
* +- end of ram ----+ / +----------------+
* | ... gap ... |/ | kasan |
* +- shadow start --+ | zero |
* | 1/8 addr space | | page |
* +- shadow end -+ | mapping |
* | ... gap ... |\ | (untracked) |
* +- modules vaddr -+ \ +----------------+
* | 2Gb | \| unmapped | allocated per module
* +-----------------+ +- shadow end ---+
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN;
kasan_early_vmemmap_populate(__sha(max_physmem_end),
__sha(untracked_mem_end),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;
/* populate identity mapping */
kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
kasan_set_pgd(early_pg_dir, asce_type);
kasan_enable_dat();
/* enable kasan */
init_task.kasan_depth = 0;
memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
sclp_early_printk("KernelAddressSanitizer initialized\n");
}
void __init kasan_copy_shadow(pgd_t *pg_dir)
{
/*
* At this point we are still running on early pages setup early_pg_dir,
* while swapper_pg_dir has just been initialized with identity mapping.
* Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
*/
pgd_t *pg_dir_src;
pgd_t *pg_dir_dst;
p4d_t *p4_dir_src;
p4d_t *p4_dir_dst;
pud_t *pu_dir_src;
pud_t *pu_dir_dst;
pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
if (!p4d_folded(*p4_dir_src)) {
/* 4 level paging */
memcpy(p4_dir_dst, p4_dir_src,
(KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
return;
}
/* 3 level paging */
pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
memcpy(pu_dir_dst, pu_dir_src,
(KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
}
void __init kasan_free_early_identity(void)
{
memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
}

View File

@ -89,10 +89,8 @@ static int __memcpy_real(void *dest, void *src, size_t count)
return rc;
}
/*
* Copy memory in real mode (kernel to kernel)
*/
int memcpy_real(void *dest, void *src, size_t count)
static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
unsigned long count)
{
int irqs_disabled, rc;
unsigned long flags;
@ -103,13 +101,30 @@ int memcpy_real(void *dest, void *src, size_t count)
irqs_disabled = arch_irqs_disabled_flags(flags);
if (!irqs_disabled)
trace_hardirqs_off();
rc = __memcpy_real(dest, src, count);
rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
if (!irqs_disabled)
trace_hardirqs_on();
__arch_local_irq_ssm(flags);
return rc;
}
/*
* Copy memory in real mode (kernel to kernel)
*/
int memcpy_real(void *dest, void *src, size_t count)
{
if (S390_lowcore.nodat_stack != 0)
return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
3, dest, src, count);
/*
* This is a really early memcpy_real call, the stacks are
* not set up yet. Just call _memcpy_real on the early boot
* stack
*/
return _memcpy_real((unsigned long) dest,(unsigned long) src,
(unsigned long) count);
}
/*
* Copy memory in absolute mode (kernel to kernel)
*/

View File

@ -1,62 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
start, start + size - 1);
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
void __init detect_memory_memblock(void)
{
unsigned long memsize, rnmax, rzm, addr, size;
int type;
rzm = sclp.rzm;
rnmax = sclp.rnmax;
memsize = rzm * rnmax;
if (!rzm)
rzm = 1UL << 17;
max_physmem_end = memsize;
addr = 0;
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
do {
size = 0;
/* assume lowcore is writable */
type = addr ? tprot(addr) : CHUNK_READ_WRITE;
do {
size += rzm;
if (max_physmem_end && addr + size >= max_physmem_end)
break;
} while (type == tprot(addr + size));
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
if (max_physmem_end && (addr + size > max_physmem_end))
size = max_physmem_end - addr;
memblock_physmem_add(addr, size);
}
addr += size;
} while (addr < max_physmem_end);
memblock_set_bottom_up(false);
if (!max_physmem_end)
max_physmem_end = memblock_end_of_DRAM();
memblock_dump_all();
}

View File

@ -11,6 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/sigp.h>
#include <asm/ptrace.h>
/* The purgatory is the code running between two kernels. It's main purpose
* is to verify that the next kernel was not corrupted after load and to
@ -88,8 +89,7 @@ ENTRY(purgatory_start)
.base_crash:
/* Setup stack */
larl %r15,purgatory_end
aghi %r15,-160
larl %r15,purgatory_end-STACK_FRAME_OVERHEAD
/* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called
* directly with a flag passed in %r2 whether the purgatory shall do

View File

@ -73,6 +73,17 @@ config ZCRYPT
+ Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
+ Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
config ZCRYPT_MULTIDEVNODES
bool "Support for multiple zcrypt device nodes"
default y
depends on S390
depends on ZCRYPT
help
With this option enabled the zcrypt device driver can
provide multiple devices nodes in /dev. Each device
node can get customized to limit access and narrow
down the use of the available crypto hardware.
config PKEY
tristate "Kernel API for protected key handling"
depends on S390

View File

@ -3309,10 +3309,8 @@ dasd_exit(void)
dasd_proc_exit();
#endif
dasd_eer_exit();
if (dasd_page_cache != NULL) {
kmem_cache_destroy(dasd_page_cache);
dasd_page_cache = NULL;
}
kmem_cache_destroy(dasd_page_cache);
dasd_page_cache = NULL;
dasd_gendisk_exit();
dasd_devmap_exit();
if (dasd_debug_area != NULL) {

View File

@ -11,6 +11,7 @@ endif
GCOV_PROFILE_sclp_early_core.o := n
KCOV_INSTRUMENT_sclp_early_core.o := n
UBSAN_SANITIZE_sclp_early_core.o := n
KASAN_SANITIZE_sclp_early_core.o := n
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY

View File

@ -58,22 +58,31 @@ struct mon_private {
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
struct appldata_product_id id;
struct appldata_parameter_list *parm_list;
struct appldata_product_id *id;
int rc;
memcpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
id.release_nr = myhdr->release;
id.mod_lvl = myhdr->mod_level;
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
id = kmalloc(sizeof(*id), GFP_KERNEL);
parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
rc = -ENOMEM;
if (!id || !parm_list)
goto out;
memcpy(id->prod_nr, "LNXAPPL", 7);
id->prod_fn = myhdr->applid;
id->record_nr = myhdr->record_num;
id->version_nr = myhdr->version;
id->release_nr = myhdr->release;
id->mod_lvl = myhdr->mod_level;
rc = appldata_asm(parm_list, id, fcn,
(void *) buffer, myhdr->datalen);
if (rc <= 0)
return rc;
goto out;
pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5)
return -EPERM;
return -EINVAL;
rc = (rc == 5) ? -EPERM : -EINVAL;
out:
kfree(id);
kfree(parm_list);
return rc;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,

View File

@ -63,6 +63,9 @@
typedef unsigned int sclp_cmdw_t;
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_STORAGE_INFO 0x00040001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
@ -156,6 +159,54 @@ struct read_cpu_info_sccb {
u8 reserved[4096 - 16];
} __attribute__((packed, aligned(PAGE_SIZE)));
struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
u8 _pad_11[16 - 11]; /* 11-15 */
u16 ncpurl; /* 16-17 */
u16 cpuoff; /* 18-19 */
u8 _pad_20[24 - 20]; /* 20-23 */
u8 loadparm[8]; /* 24-31 */
u8 _pad_32[42 - 32]; /* 32-41 */
u8 fac42; /* 42 */
u8 fac43; /* 43 */
u8 _pad_44[48 - 44]; /* 44-47 */
u64 facilities; /* 48-55 */
u8 _pad_56[66 - 56]; /* 56-65 */
u8 fac66; /* 66 */
u8 _pad_67[76 - 67]; /* 67-83 */
u32 ibc; /* 76-79 */
u8 _pad80[84 - 80]; /* 80-83 */
u8 fac84; /* 84 */
u8 fac85; /* 85 */
u8 _pad_86[91 - 86]; /* 86-90 */
u8 fac91; /* 91 */
u8 _pad_92[98 - 92]; /* 92-97 */
u8 fac98; /* 98 */
u8 hamaxpow; /* 99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
u32 hsa_size; /* 112-115 */
u8 fac116; /* 116 */
u8 fac117; /* 117 */
u8 fac118; /* 118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
u32 hmfai; /* 124-127 */
u8 _pad_128[4096 - 128]; /* 128-4095 */
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static inline void sclp_fill_core_info(struct sclp_core_info *info,
struct read_cpu_info_sccb *sccb)
{
@ -275,6 +326,7 @@ unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb);
int sclp_early_set_event_mask(struct init_sccb *sccb,
sccb_mask_t receive_mask,
sccb_mask_t send_mask);
int sclp_early_get_info(struct read_info_sccb *info);
/* useful inlines */

View File

@ -460,15 +460,6 @@ static int sclp_mem_freeze(struct device *dev)
return -EPERM;
}
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
@ -498,7 +489,7 @@ static int __init sclp_detect_standby_memory(void)
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request(0x00040001 | id << 8, sccb);
rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {

View File

@ -15,80 +15,17 @@
#include "sclp_sdias.h"
#include "sclp.h"
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
u8 _pad_11[16 - 11]; /* 11-15 */
u16 ncpurl; /* 16-17 */
u16 cpuoff; /* 18-19 */
u8 _pad_20[24 - 20]; /* 20-23 */
u8 loadparm[8]; /* 24-31 */
u8 _pad_32[42 - 32]; /* 32-41 */
u8 fac42; /* 42 */
u8 fac43; /* 43 */
u8 _pad_44[48 - 44]; /* 44-47 */
u64 facilities; /* 48-55 */
u8 _pad_56[66 - 56]; /* 56-65 */
u8 fac66; /* 66 */
u8 _pad_67[76 - 67]; /* 67-83 */
u32 ibc; /* 76-79 */
u8 _pad80[84 - 80]; /* 80-83 */
u8 fac84; /* 84 */
u8 fac85; /* 85 */
u8 _pad_86[91 - 86]; /* 86-90 */
u8 fac91; /* 91 */
u8 _pad_92[98 - 92]; /* 92-97 */
u8 fac98; /* 98 */
u8 hamaxpow; /* 99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
u8 fac117; /* 117 */
u8 fac118; /* 118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
u32 hmfai; /* 124-127 */
u8 _pad_128[4096 - 128]; /* 128-4095 */
} __packed __aligned(PAGE_SIZE);
static struct sclp_ipl_info sclp_ipl_info;
struct sclp_info sclp;
EXPORT_SYMBOL(sclp);
static int __init sclp_early_read_info(struct read_info_sccb *sccb)
{
int i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
if (sclp_early_cmd(commands[i], sccb))
break;
if (sccb->header.response_code == 0x10)
return 0;
if (sccb->header.response_code != 0x1f0)
break;
}
return -EIO;
}
static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
{
struct sclp_core_entry *cpue;
u16 boot_cpu_address, cpu;
if (sclp_early_read_info(sccb))
if (sclp_early_get_info(sccb))
return;
sclp.facilities = sccb->facilities;
@ -147,6 +84,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
if (sccb->hsa_size)
sclp.hsa_size = (sccb->hsa_size - 1) * PAGE_SIZE;
sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
@ -189,61 +128,6 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info)
return 0;
}
static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
{
memset(sccb, 0, sizeof(*sccb));
sccb->hdr.length = sizeof(*sccb);
sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb->evbuf.hdr.type = EVTYP_SDIAS;
sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb->evbuf.event_id = 4712;
sccb->evbuf.dbs = 1;
if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
if (sccb->hdr.response_code != 0x20)
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
{
memset(sccb, 0, PAGE_SIZE);
sccb->hdr.length = PAGE_SIZE;
if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static void __init sclp_early_hsa_size_detect(void *sccb)
{
unsigned long flags;
long size = -EIO;
raw_local_irq_save(flags);
if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
goto out;
size = sclp_early_hsa_size_init(sccb);
/* First check for synchronous response (LPAR) */
if (size)
goto out_mask;
if (!(S390_lowcore.ext_params & 1))
sclp_early_wait_irq();
size = sclp_early_hsa_copy_wait(sccb);
out_mask:
sclp_early_set_event_mask(sccb, 0, 0);
out:
raw_local_irq_restore(flags);
if (size > 0)
sclp.hsa_size = size;
}
static void __init sclp_early_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
@ -262,7 +146,6 @@ void __init sclp_early_detect(void)
sclp_early_facilities_detect(sccb);
sclp_early_init_core_info(sccb);
sclp_early_hsa_size_detect(sccb);
/*
* Turn off SCLP event notifications. Also save remote masks in the

View File

@ -9,9 +9,13 @@
#include <asm/lowcore.h>
#include <asm/ebcdic.h>
#include <asm/irq.h>
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include "sclp.h"
#include "sclp_rw.h"
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
/*
@ -234,3 +238,115 @@ void sclp_early_printk_force(const char *str)
{
__sclp_early_printk(str, strlen(str), 1);
}
int __init sclp_early_read_info(void)
{
int i;
struct read_info_sccb *sccb = &sclp_info_sccb;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
if (sclp_early_cmd(commands[i], sccb))
break;
if (sccb->header.response_code == 0x10) {
sclp_info_sccb_valid = 1;
return 0;
}
if (sccb->header.response_code != 0x1f0)
break;
}
return -EIO;
}
int __init sclp_early_get_info(struct read_info_sccb *info)
{
if (!sclp_info_sccb_valid)
return -EIO;
*info = sclp_info_sccb;
return 0;
}
int __init sclp_early_get_memsize(unsigned long *mem)
{
unsigned long rnmax;
unsigned long rnsize;
struct read_info_sccb *sccb = &sclp_info_sccb;
if (!sclp_info_sccb_valid)
return -EIO;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rnsize <<= 20;
*mem = rnsize * rnmax;
return 0;
}
int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
{
if (!sclp_info_sccb_valid)
return -EIO;
*hsa_size = 0;
if (sclp_info_sccb.hsa_size)
*hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE;
return 0;
}
#define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
void __weak __init add_mem_detect_block(u64 start, u64 end) {}
int __init sclp_early_read_storage_info(void)
{
struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb;
int rc, id, max_id = 0;
unsigned long rn, rzm;
sclp_cmdw_t command;
u16 sn;
if (!sclp_info_sccb_valid)
return -EIO;
if (!(sclp_info_sccb.facilities & SCLP_STORAGE_INFO_FACILITY))
return -EOPNOTSUPP;
rzm = sclp_info_sccb.rnsize ?: sclp_info_sccb.rnsize2;
rzm <<= 20;
for (id = 0; id <= max_id; id++) {
memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb));
sccb->header.length = sizeof(sclp_early_sccb);
command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
rc = sclp_early_cmd(command, sccb);
if (rc)
goto fail;
max_id = sccb->max_id;
switch (sccb->header.response_code) {
case 0x0010:
for (sn = 0; sn < sccb->assigned; sn++) {
if (!sccb->entries[sn])
continue;
rn = sccb->entries[sn] >> 16;
add_mem_detect_block((rn - 1) * rzm, rn * rzm);
}
break;
case 0x0310:
case 0x0410:
break;
default:
goto fail;
}
}
return 0;
fail:
mem_detect.count = 0;
return -EIO;
}

View File

@ -24,6 +24,7 @@
#define SCLP_ATYPE_PCI 2
#define SCLP_ERRNOTIFY_AQ_RESET 0
#define SCLP_ERRNOTIFY_AQ_REPAIR 1
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
@ -111,9 +112,14 @@ static int sclp_pci_check_report(struct zpci_report_error_header *report)
if (report->version != 1)
return -EINVAL;
if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
switch (report->action) {
case SCLP_ERRNOTIFY_AQ_RESET:
case SCLP_ERRNOTIFY_AQ_REPAIR:
case SCLP_ERRNOTIFY_AQ_INFO_LOG:
break;
default:
return -EINVAL;
}
if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
return -EINVAL;

View File

@ -971,7 +971,7 @@ tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
snprintf(exception, BUFSIZE, "Data degraded");
break;
case 0x03:
snprintf(exception, BUFSIZE, "Data degraded in partion %i",
snprintf(exception, BUFSIZE, "Data degraded in partition %i",
sense->fmt.f70.mp);
break;
case 0x04:

View File

@ -153,7 +153,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
}
};
#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
#define MAXMINOR ARRAY_SIZE(sys_ser)
static char FENCE[] = {"EOR"};
static int vmlogrdr_major = 0;

View File

@ -608,6 +608,36 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
static int __ccwgroupdev_check_busid(struct device *dev, void *id)
{
char *bus_id = id;
return (strcmp(bus_id, dev_name(dev)) == 0);
}
/**
* get_ccwgroupdev_by_busid() - obtain device from a bus id
* @gdrv: driver the device is owned by
* @bus_id: bus id of the device to be searched
*
* This function searches all devices owned by @gdrv for a device with a bus
* id matching @bus_id.
* Returns:
* If a match is found, its reference count of the found device is increased
* and it is returned; else %NULL is returned.
*/
struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
char *bus_id)
{
struct device *dev;
dev = driver_find_device(&gdrv->driver, NULL, bus_id,
__ccwgroupdev_check_busid);
return dev ? to_ccwgroupdev(dev) : NULL;
}
EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed

View File

@ -595,19 +595,11 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
return 0;
}
static inline int contains_aobs(struct qdio_q *q)
{
return !q->is_input_q && q->u.out.use_cq;
}
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
{
unsigned char state = 0;
int j, b = start;
if (!contains_aobs(q))
return;
for (j = 0; j < count; ++j) {
get_buf_state(q, b, &state, 0);
if (state == SLSB_P_OUTPUT_PENDING) {
@ -618,8 +610,6 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
} else if (state == SLSB_P_OUTPUT_EMPTY) {
q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
}
@ -638,7 +628,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
@ -666,10 +655,10 @@ static void qdio_kick_handler(struct qdio_q *q)
qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
if (q->u.out.use_cq)
qdio_handle_aobs(q, start, count);
}
qdio_handle_aobs(q, start, count);
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);

View File

@ -27,7 +27,6 @@ struct qaob *qdio_allocate_aob(void)
{
return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(qdio_allocate_aob);
void qdio_release_aob(struct qaob *aob)
{

View File

@ -10,7 +10,7 @@ zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o

View File

@ -65,12 +65,11 @@ static struct device *ap_root_device;
DEFINE_SPINLOCK(ap_list_lock);
LIST_HEAD(ap_card_list);
/* Default permissions (card and domain masking) */
static struct ap_perms {
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
} ap_perms;
static DEFINE_MUTEX(ap_perms_mutex);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
static struct ap_config_info *ap_configuration;
static bool initialised;
@ -944,21 +943,9 @@ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
return 0;
}
/*
* process_mask_arg() - parse a bitmap string and clear/set the
* bits in the bitmap accordingly. The string may be given as
* absolute value, a hex string like 0x1F2E3D4C5B6A" simple over-
* writing the current content of the bitmap. Or as relative string
* like "+1-16,-32,-0x40,+128" where only single bits or ranges of
* bits are cleared or set. Distinction is done based on the very
* first character which may be '+' or '-' for the relative string
* and othewise assume to be an absolute value string. If parsing fails
* a negative errno value is returned. All arguments and bitmaps are
* big endian order.
*/
static int process_mask_arg(const char *str,
unsigned long *bitmap, int bits,
struct mutex *lock)
int ap_parse_mask_str(const char *str,
unsigned long *bitmap, int bits,
struct mutex *lock)
{
unsigned long *newmap, size;
int rc;
@ -989,6 +976,7 @@ static int process_mask_arg(const char *str,
kfree(newmap);
return rc;
}
EXPORT_SYMBOL(ap_parse_mask_str);
/*
* AP bus attributes.
@ -1049,6 +1037,21 @@ static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
return snprintf(buf, PAGE_SIZE, "not supported\n");
return snprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
ap_configuration->apm[0], ap_configuration->apm[1],
ap_configuration->apm[2], ap_configuration->apm[3],
ap_configuration->apm[4], ap_configuration->apm[5],
ap_configuration->apm[6], ap_configuration->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
@ -1161,7 +1164,7 @@ static ssize_t apmask_store(struct bus_type *bus, const char *buf,
{
int rc;
rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
if (rc)
return rc;
@ -1192,7 +1195,7 @@ static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
{
int rc;
rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
if (rc)
return rc;
@ -1207,6 +1210,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask,
&bus_attr_ap_usage_domain_mask,
&bus_attr_ap_adapter_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
@ -1218,11 +1222,10 @@ static struct bus_attribute *const ap_bus_attrs[] = {
};
/**
* ap_select_domain(): Select an AP domain.
*
* Pick one of the 16 AP domains.
* ap_select_domain(): Select an AP domain if possible and we haven't
* already done so before.
*/
static int ap_select_domain(void)
static void ap_select_domain(void)
{
int count, max_count, best_domain;
struct ap_queue_status status;
@ -1237,7 +1240,7 @@ static int ap_select_domain(void)
if (ap_domain_index >= 0) {
/* Domain has already been selected. */
spin_unlock_bh(&ap_domain_lock);
return 0;
return;
}
best_domain = -1;
max_count = 0;
@ -1264,11 +1267,8 @@ static int ap_select_domain(void)
if (best_domain >= 0) {
ap_domain_index = best_domain;
AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
spin_unlock_bh(&ap_domain_lock);
return 0;
}
spin_unlock_bh(&ap_domain_lock);
return -ENODEV;
}
/*
@ -1346,8 +1346,7 @@ static void ap_scan_bus(struct work_struct *unused)
AP_DBF(DBF_DEBUG, "%s running\n", __func__);
ap_query_configuration(ap_configuration);
if (ap_select_domain() != 0)
goto out;
ap_select_domain();
for (id = 0; id < AP_DEVICES; id++) {
/* check if device is registered */
@ -1467,12 +1466,11 @@ static void ap_scan_bus(struct work_struct *unused)
}
} /* end device loop */
if (defdomdevs < 1)
if (ap_domain_index >= 0 && defdomdevs < 1)
AP_DBF(DBF_INFO,
"no queue device with default domain %d available\n",
ap_domain_index);
out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
@ -1496,21 +1494,22 @@ static int __init ap_debug_init(void)
static void __init ap_perms_init(void)
{
/* all resources useable if no kernel parameter string given */
memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
/* apm kernel parameter string */
if (apm_str) {
memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES,
&ap_perms_mutex);
ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
&ap_perms_mutex);
}
/* aqm kernel parameter string */
if (aqm_str) {
memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS,
&ap_perms_mutex);
ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
&ap_perms_mutex);
}
}
@ -1533,7 +1532,7 @@ static int __init ap_module_init(void)
return -ENODEV;
}
/* set up the AP permissions (ap and aq masks) */
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
/* Get AP configuration data if available */

Some files were not shown because too many files have changed in this diff Show More