s390 updates for the 5.14 merge window

- Rework inline asm to get rid of error prone "register asm" constructs,
   which are problematic especially when code instrumentation is enabled. In
   particular introduce and use register pair union to allocate even/odd
   register pairs. Unfortunately this breaks compatibility with older
   clang compilers and minimum clang version for s390 has been raised to 13.
   https://lore.kernel.org/linux-next/CAK7LNARuSmPCEy-ak0erPrPTgZdGVypBROFhtw+=3spoGoYsyw@mail.gmail.com/
 
 - Fix gcc 11 warnings, which triggered various minor reworks all over
   the code.
 
 - Add zstd kernel image compression support.
 
 - Rework boot CPU lowcore handling.
 
 - De-duplicate and move kernel memory layout setup logic earlier.
 
 - Few fixes in preparation for FORTIFY_SOURCE performing compile-time
   and run-time field bounds checking for mem functions.
 
 - Remove broken and unused power management support leftovers in s390
   drivers.
 
 - Disable stack-protector for decompressor and purgatory to fix buildroot
   build.
 
 - Fix vt220 sclp console name to match the char device name.
 
 - Enable HAVE_IOREMAP_PROT and add zpci_set_irq()/zpci_clear_irq() in
   zPCI code.
 
 - Remove some implausible WARN_ON_ONCEs and remove arch specific counter
   transaction call backs in favour of default transaction handling in
   perf code.
 
 - Extend/add new uevents for online/config/mode state changes of
   AP card / queue device in zcrypt.
 
 - Minor entry and ccwgroup code improvements.
 
 - Other small various fixes and improvements all over the code.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmDhuTEACgkQjYWKoQLX
 FBjVlggAgDFBkDjlyfvrm4xzmHi7BJMmhrTJIONsSz+3tcA4/u5kE+Hrdrqxm0Uh
 ZH4MXBxn4q4Fmoomhu5w5ZDe8o2ip0aN9fFNdsBoP8hurmQbL/IbdTnBETKMrKpV
 XpogU2G7p+2nQ0+9+o6PS/vWlZhI88NVh8dWyRd2+5/XdMycgLv2Qm7NpQoACVw1
 CbUvxP2PlpZ0wltLvNBKPg1xXMZa3GS0wbVUsS2jiWcr/3VzCqfTHenZJ/RadoE6
 axG99QXCbLDMsJgVQcXtlI8K6Z461fAwbNtWZWC+Uq7o5pYuUFW1dovMg9WWF+7T
 lFNqXyyNy5wwITRkvuzjlVTE8yzYYg==
 =ADZ4
 -----END PGP SIGNATURE-----

Merge tag 's390-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Rework inline asm to get rid of error prone "register asm"
   constructs, which are problematic especially when code
   instrumentation is enabled.

   In particular introduce and use register pair union to allocate
   even/odd register pairs. Unfortunately this breaks compatibility with
   older clang compilers and minimum clang version for s390 has been
   raised to 13.

     https://lore.kernel.org/linux-next/CAK7LNARuSmPCEy-ak0erPrPTgZdGVypBROFhtw+=3spoGoYsyw@mail.gmail.com/

 - Fix gcc 11 warnings, which triggered various minor reworks all over
   the code.

 - Add zstd kernel image compression support.

 - Rework boot CPU lowcore handling.

 - De-duplicate and move kernel memory layout setup logic earlier.

 - Few fixes in preparation for FORTIFY_SOURCE performing compile-time
   and run-time field bounds checking for mem functions.

 - Remove broken and unused power management support leftovers in s390
   drivers.

 - Disable stack-protector for decompressor and purgatory to fix
   buildroot build.

 - Fix vt220 sclp console name to match the char device name.

 - Enable HAVE_IOREMAP_PROT and add zpci_set_irq()/zpci_clear_irq() in
   zPCI code.

 - Remove some implausible WARN_ON_ONCEs and remove arch specific
   counter transaction call backs in favour of default transaction
   handling in perf code.

 - Extend/add new uevents for online/config/mode state changes of AP
   card / queue device in zcrypt.

 - Minor entry and ccwgroup code improvements.

 - Other small various fixes and improvements all over the code.

* tag 's390-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (91 commits)
  s390/dasd: use register pair instead of register asm
  s390/qdio: get rid of register asm
  s390/ioasm: use symbolic names for asm operands
  s390/ioasm: get rid of register asm
  s390/cmf: get rid of register asm
  s390/lib,string: get rid of register asm
  s390/lib,uaccess: get rid of register asm
  s390/string: get rid of register asm
  s390/cmpxchg: use register pair instead of register asm
  s390/mm,pages-states: get rid of register asm
  s390/lib,xor: get rid of register asm
  s390/timex: get rid of register asm
  s390/hypfs: use register pair instead of register asm
  s390/zcrypt: Switch to flexible array member
  s390/speculation: Use statically initialized const for instructions
  virtio/s390: get rid of open-coded kvm hypercall
  s390/pci: add zpci_set_irq()/zpci_clear_irq()
  scripts/min-tool-version.sh: Raise minimum clang version to 13.0.0 for s390
  s390/ipl: use register pair instead of register asm
  s390/mem_detect: fix tprot() program check new psw handling
  ...
This commit is contained in:
Linus Torvalds 2021-07-04 12:17:38 -07:00
commit 2bb919b62f
112 changed files with 1249 additions and 1852 deletions

View File

@ -162,6 +162,7 @@ config S390
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
select HAVE_IOREMAP_PROT if PCI
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@ -170,6 +171,7 @@ config S390
select HAVE_KERNEL_LZO
select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
select HAVE_KERNEL_ZSTD
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
@ -852,7 +854,7 @@ config CMM_IUCV
config APPLDATA_BASE
def_bool n
prompt "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS
depends on PROC_SYSCTL
help
This provides a kernel interface for creating and updating z/VM APPLDATA
monitor records. The monitor records are updated at certain time

View File

@ -28,6 +28,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))

View File

@ -68,7 +68,7 @@ void print_missing_facilities(void)
first = 1;
for (i = 0; i < ARRAY_SIZE(als); i++) {
val = ~S390_lowcore.stfle_fac_list[i] & als[i];
val = ~stfle_fac_list[i] & als[i];
for (j = 0; j < BITS_PER_LONG; j++) {
if (!(val & (1UL << (BITS_PER_LONG - 1 - j))))
continue;
@ -106,9 +106,9 @@ void verify_facilities(void)
{
int i;
__stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list));
__stfle(stfle_fac_list, ARRAY_SIZE(stfle_fac_list));
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i])
if ((stfle_fac_list[i] & als[i]) != als[i])
facility_mismatch();
}
}

View File

@ -24,6 +24,7 @@ void __printf(1, 2) decompressor_printk(const char *fmt, ...);
extern const char kernel_version[];
extern unsigned long memory_limit;
extern unsigned long vmalloc_size;
extern int vmalloc_size_set;
extern int kaslr_enabled;

View File

@ -14,6 +14,7 @@ obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-all := $(obj-y) piggy.o syms.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += vmlinux.bin.zst
targets += info.bin syms.bin vmlinux.syms $(obj-all)
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
@ -33,7 +34,7 @@ $(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OB
quiet_cmd_dumpsyms = DUMPSYMS $<
define cmd_dumpsyms
$(NM) -n -S --format=bsd "$<" | $(PERL) -ne '/(\w+)\s+(\w+)\s+[tT]\s+(\w+)/ and printf "%x %x %s\0",hex $$1,hex $$2,$$3' > "$@"
$(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@"
endef
$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE
@ -63,6 +64,7 @@ suffix-$(CONFIG_KERNEL_LZ4) := .lz4
suffix-$(CONFIG_KERNEL_LZMA) := .lzma
suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
suffix-$(CONFIG_KERNEL_ZSTD) := .zst
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
@ -76,6 +78,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE
$(call if_changed,zstd22)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE

View File

@ -28,8 +28,10 @@ extern char _end[];
extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[];
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#ifdef CONFIG_KERNEL_BZIP2
#define BOOT_HEAP_SIZE 0x400000
#elif CONFIG_KERNEL_ZSTD
#define BOOT_HEAP_SIZE 0x30000
#else
#define BOOT_HEAP_SIZE 0x10000
#endif
@ -61,6 +63,10 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#include "../../../../lib/decompress_unxz.c"
#endif
#ifdef CONFIG_KERNEL_ZSTD
#include "../../../../lib/decompress_unzstd.c"
#endif
#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE)
unsigned long mem_safe_offset(void)

View File

@ -401,6 +401,7 @@ SYM_CODE_END(startup_pgm_check_handler)
# Must be keept in sync with struct parmarea in setup.h
#
.org PARMAREA
SYM_DATA_START(parmarea)
.quad 0 # IPL_DEVICE
.quad 0 # INITRD_START
.quad 0 # INITRD_SIZE
@ -411,6 +412,8 @@ SYM_CODE_END(startup_pgm_check_handler)
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org PARMAREA+__PARMAREA_SIZE
SYM_DATA_END(parmarea)
.org EARLY_SCCB_OFFSET
.fill 4096

View File

@ -12,39 +12,44 @@
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
int __bootdata(noexec_disabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long)addr;
register unsigned long _rc asm("1") = 0;
unsigned long reg1, reg2;
psw_t old = S390_lowcore.program_new_psw;
union register_pair r1;
psw_t old;
r1.even = (unsigned long) addr;
r1.odd = 0;
asm volatile(
" epsw %0,%1\n"
" st %0,%[psw_pgm]\n"
" st %1,%[psw_pgm]+4\n"
" larl %0,1f\n"
" stg %0,%[psw_pgm]+8\n"
" diag %[addr],%[subcode],0x308\n"
"1: nopr %%r7\n"
: "=&d" (reg1), "=&a" (reg2),
[psw_pgm] "=Q" (S390_lowcore.program_new_psw),
[addr] "+d" (_addr), "+d" (_rc)
: [subcode] "d" (subcode)
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" diag %[r1],%[subcode],0x308\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [r1] "+&d" (r1.pair),
[reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
"+Q" (S390_lowcore.program_new_psw),
"=Q" (old)
: [subcode] "d" (subcode),
[psw_old] "a" (&old),
[psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return _rc;
return r1.odd;
}
void store_ipl_parmblock(void)
@ -165,12 +170,12 @@ static inline int has_ebcdic_char(const char *str)
void setup_boot_command_line(void)
{
COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
parmarea.command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
if (has_ebcdic_char(COMMAND_LINE))
EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
if (has_ebcdic_char(parmarea.command_line))
EBCASC(parmarea.command_line, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
strcpy(early_command_line, strim(COMMAND_LINE));
strcpy(early_command_line, strim(parmarea.command_line));
/* append IPL PARM data to the boot command line */
if (!is_prot_virt_guest() && ipl_block_valid)
@ -180,9 +185,9 @@ void setup_boot_command_line(void)
static void modify_facility(unsigned long nr, bool clear)
{
if (clear)
__clear_facility(nr, S390_lowcore.stfle_fac_list);
__clear_facility(nr, stfle_fac_list);
else
__set_facility(nr, S390_lowcore.stfle_fac_list);
__set_facility(nr, stfle_fac_list);
}
static void check_cleared_facilities(void)
@ -191,7 +196,7 @@ static void check_cleared_facilities(void)
int i;
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) {
if ((stfle_fac_list[i] & als[i]) != als[i]) {
sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;

View File

@ -64,30 +64,37 @@ void add_mem_detect_block(u64 start, u64 end)
static int __diag260(unsigned long rx1, unsigned long rx2)
{
register unsigned long _rx1 asm("2") = rx1;
register unsigned long _rx2 asm("3") = rx2;
register unsigned long _ry asm("4") = 0x10; /* storage configuration */
int rc = -1; /* fail */
unsigned long reg1, reg2;
psw_t old = S390_lowcore.program_new_psw;
unsigned long reg1, reg2, ry;
union register_pair rx;
psw_t old;
int rc;
rx.even = rx1;
rx.odd = rx2;
ry = 0x10; /* storage configuration */
rc = -1; /* fail */
asm volatile(
" epsw %0,%1\n"
" st %0,%[psw_pgm]\n"
" st %1,%[psw_pgm]+4\n"
" larl %0,1f\n"
" stg %0,%[psw_pgm]+8\n"
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" diag %[rx],%[ry],0x260\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1:\n"
: "=&d" (reg1), "=&a" (reg2),
[psw_pgm] "=Q" (S390_lowcore.program_new_psw),
[rc] "+&d" (rc), [ry] "+d" (_ry)
: [rx] "d" (_rx1), "d" (_rx2)
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
[ry] "+&d" (ry),
"+Q" (S390_lowcore.program_new_psw),
"=Q" (old)
: [rx] "d" (rx.pair),
[psw_old] "a" (&old),
[psw_pgm] "a" (&S390_lowcore.program_new_psw)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return rc == 0 ? _ry : -1;
return rc == 0 ? ry : -1;
}
static int diag260(void)
@ -111,24 +118,30 @@ static int diag260(void)
static int tprot(unsigned long addr)
{
unsigned long pgm_addr;
unsigned long reg1, reg2;
int rc = -EFAULT;
psw_t old = S390_lowcore.program_new_psw;
psw_t old;
S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
" larl %[pgm_addr],1f\n"
" stg %[pgm_addr],%[psw_pgm_addr]\n"
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" tprot 0(%[addr]),0\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1:\n"
: [pgm_addr] "=&d"(pgm_addr),
[psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
[rc] "+&d"(rc)
: [addr] "a"(addr)
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
"=Q" (S390_lowcore.program_new_psw.addr),
"=Q" (old)
: [psw_old] "a" (&old),
[psw_pgm] "a" (&S390_lowcore.program_new_psw),
[addr] "a" (addr)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return rc;
}

View File

@ -5,6 +5,7 @@
#include <asm/sections.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
@ -15,8 +16,17 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);
u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
* relocated above 2 GB, because it has to use 31 bit addresses.
@ -169,6 +179,86 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}
static void setup_kernel_memory_layout(void)
{
bool vmalloc_size_verified = false;
unsigned long vmemmap_off;
unsigned long vspace_left;
unsigned long rte_size;
unsigned long pages;
unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
if (IS_ENABLED(CONFIG_KASAN) ||
vmalloc_size > _REGION2_SIZE ||
vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
vmax = _REGION1_SIZE;
else
vmax = _REGION2_SIZE;
/* keep vmemmap_off aligned to a top level region table entry */
rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
MODULES_END = vmax;
if (is_prot_virt_host()) {
/*
* forcing modules and vmalloc area under the ultravisor
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
*/
adjust_to_uv_max(&MODULES_END);
}
#ifdef CONFIG_KASAN
if (MODULES_END < vmax) {
/* force vmalloc and modules below kasan shadow */
MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
} else {
/*
* leave vmalloc and modules above kasan shadow but make
* sure they don't overlap with it
*/
vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
vmalloc_size_verified = true;
vspace_left = KASAN_SHADOW_START;
}
#endif
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
if (vmalloc_size_verified) {
VMALLOC_START = VMALLOC_END - vmalloc_size;
} else {
vmemmap_off = round_up(ident_map_size, rte_size);
if (vmemmap_off + vmemmap_size > VMALLOC_END ||
vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
/*
* allow vmalloc area to occupy up to 1/2 of
* the rest virtual space left.
*/
vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
}
VMALLOC_START = VMALLOC_END - vmalloc_size;
vspace_left = VMALLOC_START;
}
pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages);
vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
/* keep vmemmap left most starting from a fresh region table entry */
vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
/* take care that identity map is lower then vmemmap */
ident_map_size = min(ident_map_size, vmemmap_off);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
vmemmap = (struct page *)vmemmap_off;
}
/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
@ -208,6 +298,7 @@ void startup_kernel(void)
parse_boot_command_line();
setup_ident_map_size(detect_memory());
setup_vmalloc_size();
setup_kernel_memory_layout();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {

View File

@ -44,3 +44,28 @@ void uv_query_info(void)
prot_virt_guest = 1;
#endif
}
#if IS_ENABLED(CONFIG_KVM)
static bool has_uv_sec_stor_limit(void)
{
/*
* keep these conditions in line with setup_uv()
*/
if (!is_prot_virt_host())
return false;
if (is_prot_virt_guest())
return false;
if (!test_facility(158))
return false;
return !!uv_info.max_sec_stor_addr;
}
void adjust_to_uv_max(unsigned long *vmax)
{
if (has_uv_sec_stor_limit())
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
}
#endif

View File

@ -25,14 +25,13 @@
static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
{
register unsigned long _data asm("2") = (unsigned long) data;
register unsigned long _rc asm("3");
register unsigned long _cmd asm("4") = cmd;
union register_pair r1 = { .even = (unsigned long)data, };
asm volatile("diag %1,%2,0x304\n"
: "=d" (_rc) : "d" (_data), "d" (_cmd) : "memory");
return _rc;
asm volatile("diag %[r1],%[r3],0x304\n"
: [r1] "+&d" (r1.pair)
: [r3] "d" (cmd)
: "memory");
return r1.odd;
}
static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd)

View File

@ -299,13 +299,13 @@ static inline unsigned char __flogr(unsigned long word)
}
return bit;
} else {
register unsigned long bit asm("4") = word;
register unsigned long out asm("5");
union register_pair rp;
rp.even = word;
asm volatile(
" flogr %[bit],%[bit]\n"
: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
return bit;
" flogr %[rp],%[rp]\n"
: [rp] "+d" (rp.pair) : : "cc");
return rp.even;
}
}

View File

@ -11,8 +11,7 @@ struct ccw_driver;
* @count: number of attached slave devices
* @dev: embedded device structure
* @cdev: variable number of slave devices, allocated as needed
* @ungroup_work: work to be done when a ccwgroup notifier has action
* type %BUS_NOTIFY_UNBIND_DRIVER
* @ungroup_work: used to ungroup the ccwgroup device
*/
struct ccwgroup_device {
enum {

View File

@ -29,13 +29,15 @@
*/
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
register unsigned long reg2 asm("2") = (unsigned long) buff;
register unsigned long reg3 asm("3") = (unsigned long) len;
union register_pair rp = {
.even = (unsigned long) buff,
.odd = (unsigned long) len,
};
asm volatile(
"0: cksm %0,%1\n" /* do checksum on longs */
"0: cksm %[sum],%[rp]\n"
" jo 0b\n"
: "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
: [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
return sum;
}

View File

@ -9,6 +9,7 @@
#include <linux/bitops.h>
#include <linux/genalloc.h>
#include <asm/types.h>
#include <asm/tpi.h>
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0

View File

@ -169,32 +169,36 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
#define system_has_cmpxchg_double() 1
#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
register __typeof__(*(p2)) __old2 asm("3") = (o2); \
register __typeof__(*(p1)) __new1 asm("4") = (n1); \
register __typeof__(*(p2)) __new2 asm("5") = (n2); \
int cc; \
asm volatile( \
" cdsg %[old],%[new],%[ptr]\n" \
" ipm %[cc]\n" \
" srl %[cc],28" \
: [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
: [new] "d" (__new1), "d" (__new2), \
[ptr] "Q" (*(p1)), "Q" (*(p2)) \
: "memory", "cc"); \
!cc; \
})
static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2,
unsigned long o1, unsigned long o2,
unsigned long n1, unsigned long n2)
{
union register_pair old = { .even = o1, .odd = o2, };
union register_pair new = { .even = n1, .odd = n2, };
int cc;
asm volatile(
" cdsg %[old],%[new],%[ptr]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [old] "+&d" (old.pair)
: [new] "d" (new.pair),
[ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2)
: "memory", "cc");
return !cc;
}
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
typeof(p1) __p1 = (p1); \
typeof(p2) __p2 = (p2); \
\
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
__cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
__cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \
(unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2)); \
})
#endif /* __ASM_CMPXCHG_H */

View File

@ -92,9 +92,8 @@ struct cpu_cf_events {
struct cpumf_ctr_info info;
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
atomic64_t alert;
u64 state, tx_state;
u64 state;
unsigned int flags;
unsigned int txn_flags;
};
DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);

View File

@ -13,7 +13,10 @@
#include <linux/preempt.h>
#include <asm/lowcore.h>
#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
extern u64 stfle_fac_list[16];
extern u64 alt_stfle_fac_list[16];
static inline void __set_facility(unsigned long nr, void *facilities)
{
@ -56,18 +59,20 @@ static inline int test_facility(unsigned long nr)
if (__test_facility(nr, &facilities_als))
return 1;
}
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
return __test_facility(nr, &stfle_fac_list);
}
static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
{
register unsigned long reg0 asm("0") = size - 1;
unsigned long reg0 = size - 1;
asm volatile(
".insn s,0xb2b00000,0(%1)" /* stfle */
: "+d" (reg0)
: "a" (stfle_fac_list)
: "memory", "cc");
" lgr 0,%[reg0]\n"
" .insn s,0xb2b00000,%[list]\n" /* stfle */
" lgr %[reg0],0\n"
: [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
:
: "memory", "cc", "0");
return reg0;
}
@ -79,13 +84,15 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
static inline void __stfle(u64 *stfle_fac_list, int size)
{
unsigned long nr;
u32 stfl_fac_list;
asm volatile(
" stfl 0(0)\n"
: "=m" (S390_lowcore.stfl_fac_list));
stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(stfle_fac_list, &stfl_fac_list, 4);
nr = 4; /* bytes stored by stfl */
memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
if (S390_lowcore.stfl_fac_list & 0x01000000) {
if (stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
nr = __stfle_asm(stfle_fac_list, size);
nr = min_t(unsigned long, (nr + 1) * 8, size * 8);

View File

@ -32,45 +32,45 @@
})
/* set system mask. */
static inline notrace void __arch_local_irq_ssm(unsigned long flags)
static __always_inline void __arch_local_irq_ssm(unsigned long flags)
{
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
static inline notrace unsigned long arch_local_save_flags(void)
static __always_inline unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stnsm(0xff);
}
static inline notrace unsigned long arch_local_irq_save(void)
static __always_inline unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
static inline notrace void arch_local_irq_disable(void)
static __always_inline void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
static inline notrace void arch_local_irq_enable(void)
static __always_inline void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
/* This only restores external and I/O interrupt state */
static inline notrace void arch_local_irq_restore(unsigned long flags)
static __always_inline void arch_local_irq_restore(unsigned long flags)
{
/* only disabled->disabled and disabled->enabled is valid */
if (flags & ARCH_IRQ_ENABLED)
arch_local_irq_enable();
}
static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
static __always_inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & ARCH_IRQ_ENABLED);
}
static inline notrace bool arch_irqs_disabled(void)
static __always_inline bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}

View File

@ -16,7 +16,6 @@
extern void kasan_early_init(void);
extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;
/*
* Estimate kasan memory requirements, which it will reserve

View File

@ -17,15 +17,23 @@
#define LC_ORDER 1
#define LC_PAGES 2
struct pgm_tdb {
u64 data[32];
};
struct lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
__u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
__u32 ext_params; /* 0x0080 */
__u16 ext_cpu_addr; /* 0x0084 */
__u16 ext_int_code; /* 0x0086 */
__u16 svc_ilc; /* 0x0088 */
__u16 svc_code; /* 0x008a */
union {
struct {
__u16 ext_cpu_addr; /* 0x0084 */
__u16 ext_int_code; /* 0x0086 */
};
__u32 ext_int_code_addr;
};
__u32 svc_int_code; /* 0x0088 */
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
__u32 data_exc_code; /* 0x0090 */
@ -40,10 +48,15 @@ struct lowcore {
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
__u64 trans_exc_code; /* 0x00a8 */
__u64 monitor_code; /* 0x00b0 */
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
__u32 io_int_word; /* 0x00c0 */
union {
struct {
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
__u32 io_int_word; /* 0x00c0 */
};
struct tpi_info tpi_info; /* 0x00b8 */
};
__u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
__u32 stfl_fac_list; /* 0x00c8 */
__u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
@ -154,12 +167,7 @@ struct lowcore {
__u64 vmcore_info; /* 0x0e0c */
__u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
__u64 os_info; /* 0x0e18 */
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
__u64 stfle_fac_list[16]; /* 0x0f00 */
__u64 alt_stfle_fac_list[16]; /* 0x0f80 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
__u8 pad_0x0e20[0x11b0-0x0e20]; /* 0x0e20 */
/* Pointer to the machine check extended save area */
__u64 mcesad; /* 0x11b0 */
@ -185,7 +193,7 @@ struct lowcore {
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
struct pgm_tdb pgm_tdb; /* 0x1800 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
} __packed __aligned(8192);

View File

@ -70,8 +70,8 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
@ -85,6 +85,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
#define switch_mm_irqs_off switch_mm_irqs_off
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)

View File

@ -55,13 +55,16 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
*/
static inline void copy_page(void *to, void *from)
{
register void *reg2 asm ("2") = to;
register unsigned long reg3 asm ("3") = 0x1000;
register void *reg4 asm ("4") = from;
register unsigned long reg5 asm ("5") = 0xb0001000;
union register_pair dst, src;
dst.even = (unsigned long) to;
dst.odd = 0x1000;
src.even = (unsigned long) from;
src.odd = 0xb0001000;
asm volatile(
" mvcl 2,4"
: "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
" mvcl %[dst],%[src]"
: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
: : "memory", "cc");
}

View File

@ -133,7 +133,8 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
u8 reserved : 3;
u8 irqs_registered : 1;
u8 reserved : 2;
unsigned int devfn; /* DEVFN part of the RID*/
struct mutex lock;
@ -271,9 +272,13 @@ struct zpci_dev *get_zdev_by_fid(u32);
int zpci_dma_init(void);
void zpci_dma_exit(void);
/* IRQ */
int __init zpci_irq_init(void);
void __init zpci_irq_exit(void);
int zpci_set_irq(struct zpci_dev *zdev);
int zpci_clear_irq(struct zpci_dev *zdev);
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);

View File

@ -164,19 +164,20 @@
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
typeof(pcp1) o1__ = (o1), n1__ = (n1); \
typeof(pcp2) o2__ = (o2), n2__ = (n2); \
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
preempt_enable_notrace(); \
ret__; \
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
\
preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double((unsigned long)p1__, (unsigned long)p2__, \
(unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2)); \
preempt_enable_notrace(); \
ret__; \
})
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double

View File

@ -17,6 +17,7 @@
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
#include <asm/sections.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@ -84,16 +85,16 @@ extern unsigned long zero_page_mask;
* happen without trampolines and in addition the placement within a
* 2GB frame is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
extern unsigned long __bootdata_preserved(VMALLOC_START);
extern unsigned long __bootdata_preserved(VMALLOC_END);
#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
extern struct page *vmemmap;
extern unsigned long vmemmap_size;
extern struct page *__bootdata_preserved(vmemmap);
extern unsigned long __bootdata_preserved(vmemmap_size);
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
extern unsigned long __bootdata_preserved(MODULES_VADDR);
extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
@ -553,27 +554,25 @@ static inline int mm_uses_skeys(struct mm_struct *mm)
static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
{
register unsigned long reg2 asm("2") = old;
register unsigned long reg3 asm("3") = new;
union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
" csp %0,%3"
: "+d" (reg2), "+m" (*ptr)
: "d" (reg3), "d" (address)
" csp %[r1],%[address]"
: [r1] "+&d" (r1.pair), "+m" (*ptr)
: [address] "d" (address)
: "cc");
}
static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
register unsigned long reg2 asm("2") = old;
register unsigned long reg3 asm("3") = new;
union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
" .insn rre,0xb98a0000,%0,%3"
: "+d" (reg2), "+m" (*ptr)
: "d" (reg3), "d" (address)
" .insn rre,0xb98a0000,%[r1],%[address]"
: [r1] "+&d" (r1.pair), "+m" (*ptr)
: [address] "d" (address)
: "cc");
}
@ -587,14 +586,12 @@ static inline void crdte(unsigned long old, unsigned long new,
unsigned long table, unsigned long dtt,
unsigned long address, unsigned long asce)
{
register unsigned long reg2 asm("2") = old;
register unsigned long reg3 asm("3") = new;
register unsigned long reg4 asm("4") = table | dtt;
register unsigned long reg5 asm("5") = address;
union register_pair r1 = { .even = old, .odd = new, };
union register_pair r2 = { .even = table | dtt, .odd = address, };
asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
: "+d" (reg2)
: "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
: [r1] "+&d" (r1.pair)
: [r2] "d" (r2.pair), [asce] "a" (asce)
: "memory", "cc");
}
@ -861,6 +858,25 @@ static inline int pte_unused(pte_t pte)
return pte_val(pte) & _PAGE_UNUSED;
}
/*
* Extract the pgprot value from the given pte while at the same time making it
* usable for kernel address space mappings where fault driven dirty and
* young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
* must not be set.
*/
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
if (pte_write(pte))
pte_flags |= pgprot_val(PAGE_KERNEL);
else
pte_flags |= pgprot_val(PAGE_KERNEL_RO);
pte_flags |= pte_val(pte) & mio_wb_bit_mask;
return __pgprot(pte_flags);
}
/*
* pgd/pmd/pte modification functions
*/

View File

@ -129,7 +129,7 @@ struct thread_struct {
struct runtime_instr_cb *ri_cb;
struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
@ -207,7 +207,7 @@ static __always_inline unsigned long current_stack_pointer(void)
return sp;
}
static __no_kasan_or_inline unsigned short stap(void)
static __always_inline unsigned short stap(void)
{
unsigned short cpu_address;
@ -246,7 +246,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
static __always_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;

View File

@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <uapi/asm/ptrace.h>
#include <asm/tpi.h>
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_SYSCALL_RESTART 1 /* restart the current system call */
@ -86,9 +87,14 @@ struct pt_regs
};
};
unsigned long orig_gpr2;
unsigned int int_code;
unsigned int int_parm;
unsigned long int_parm_long;
union {
struct {
unsigned int int_code;
unsigned int int_parm;
unsigned long int_parm_long;
};
struct tpi_info tpi_info;
};
unsigned long flags;
unsigned long cr1;
};

View File

@ -78,6 +78,8 @@ struct parmarea {
char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
};
extern struct parmarea parmarea;
extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_DISABLED 0
#define ZLIB_DFLTCC_FULL 1
@ -87,7 +89,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
extern unsigned long vmalloc_size;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;

View File

@ -41,15 +41,17 @@
static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
register unsigned long reg1 asm ("1") = parm;
union register_pair r1 = { .odd = parm, };
int cc;
asm volatile(
" sigp %1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
*status = reg1;
" sigp %[r1],%[addr],0(%[order])\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [r1] "+&d" (r1.pair)
: [addr] "d" (addr), [order] "a" (order)
: "cc");
*status = r1.even;
return cc;
}

View File

@ -107,16 +107,18 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
#ifdef __HAVE_ARCH_MEMCHR
static inline void *memchr(const void * s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile(
"0: srst %0,%1\n"
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
" jo 0b\n"
" jl 1f\n"
" la %0,0\n"
" la %[ret],0\n"
"1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
return (void *) ret;
}
#endif
@ -124,13 +126,15 @@ static inline void *memchr(const void * s, int c, size_t n)
#ifdef __HAVE_ARCH_MEMSCAN
static inline void *memscan(void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile(
"0: srst %0,%1\n"
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
" jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
return (void *) ret;
}
#endif
@ -138,17 +142,18 @@ static inline void *memscan(void *s, int c, size_t n)
#ifdef __HAVE_ARCH_STRCAT
static inline char *strcat(char *dst, const char *src)
{
register int r0 asm("0") = 0;
unsigned long dummy;
unsigned long dummy = 0;
char *ret = dst;
asm volatile(
"0: srst %0,%1\n"
" lghi 0,0\n"
"0: srst %[dummy],%[dst]\n"
" jo 0b\n"
"1: mvst %0,%2\n"
"1: mvst %[dummy],%[src]\n"
" jo 1b"
: "=&a" (dummy), "+a" (dst), "+a" (src)
: "d" (r0), "0" (0) : "cc", "memory" );
: [dummy] "+&a" (dummy), [dst] "+&a" (dst), [src] "+&a" (src)
:
: "cc", "memory", "0");
return ret;
}
#endif
@ -156,14 +161,15 @@ static inline char *strcat(char *dst, const char *src)
#ifdef __HAVE_ARCH_STRCPY
static inline char *strcpy(char *dst, const char *src)
{
register int r0 asm("0") = 0;
char *ret = dst;
asm volatile(
"0: mvst %0,%1\n"
" lghi 0,0\n"
"0: mvst %[dst],%[src]\n"
" jo 0b"
: "+&a" (dst), "+&a" (src) : "d" (r0)
: "cc", "memory");
: [dst] "+&a" (dst), [src] "+&a" (src)
:
: "cc", "memory", "0");
return ret;
}
#endif
@ -171,28 +177,33 @@ static inline char *strcpy(char *dst, const char *src)
#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s)
{
register unsigned long r0 asm("0") = 0;
unsigned long end = 0;
const char *tmp = s;
asm volatile(
"0: srst %0,%1\n"
" lghi 0,0\n"
"0: srst %[end],%[tmp]\n"
" jo 0b"
: "+d" (r0), "+a" (tmp) : : "cc", "memory");
return r0 - (unsigned long) s;
: [end] "+&a" (end), [tmp] "+&a" (tmp)
:
: "cc", "memory", "0");
return end - (unsigned long)s;
}
#endif
#ifdef __HAVE_ARCH_STRNLEN
static inline size_t strnlen(const char * s, size_t n)
{
register int r0 asm("0") = 0;
const char *tmp = s;
const char *end = s + n;
asm volatile(
"0: srst %0,%1\n"
" lghi 0,0\n"
"0: srst %[end],%[tmp]\n"
" jo 0b"
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
: [end] "+&a" (end), [tmp] "+&a" (tmp)
:
: "cc", "memory", "0");
return end - s;
}
#endif

View File

@ -75,9 +75,12 @@ static inline void set_clock_comparator(__u64 time)
static inline void set_tod_programmable_field(u16 val)
{
register unsigned long reg0 asm("0") = val;
asm volatile("sckpf" : : "d" (reg0));
asm volatile(
" lgr 0,%[val]\n"
" sckpf\n"
:
: [val] "d" ((unsigned long)val)
: "0");
}
void clock_comparator_work(void);
@ -138,16 +141,19 @@ struct ptff_qui {
#define ptff(ptff_block, len, func) \
({ \
struct addrtype { char _[len]; }; \
register unsigned int reg0 asm("0") = func; \
register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
unsigned int reg0 = func; \
unsigned long reg1 = (unsigned long)(ptff_block); \
int rc; \
\
asm volatile( \
" .word 0x0104\n" \
" ipm %0\n" \
" srl %0,28\n" \
: "=d" (rc), "+m" (*(struct addrtype *) reg1) \
: "d" (reg0), "d" (reg1) : "cc"); \
" lgr 0,%[reg0]\n" \
" lgr 1,%[reg1]\n" \
" .insn e,0x0104\n" \
" ipm %[rc]\n" \
" srl %[rc],28\n" \
: [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \
: [reg0] "d" (reg0), [reg1] "d" (reg1) \
: "cc", "0", "1"); \
rc; \
})

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_S390_TPI_H
#define _ASM_S390_TPI_H
#include <linux/types.h>
#include <uapi/asm/schid.h>
#ifndef __ASSEMBLY__
/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
struct tpi_info {
struct subchannel_id schid;
u32 intparm;
u32 adapter_IO:1;
u32 directed_irq:1;
u32 isc:3;
u32 :12;
u32 type:3;
u32 :12;
} __packed __aligned(4);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_TPI_H */

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_S390_TYPES_H
#define _ASM_S390_TYPES_H
#include <uapi/asm/types.h>
#ifndef __ASSEMBLY__
union register_pair {
unsigned __int128 pair;
struct {
unsigned long even;
unsigned long odd;
};
};
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_TYPES_H */

View File

@ -49,52 +49,51 @@ int __get_user_bad(void) __attribute__((noreturn));
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
#define __put_get_user_asm(to, from, size, spec) \
#define __put_get_user_asm(to, from, size, insn) \
({ \
register unsigned long __reg0 asm("0") = spec; \
int __rc; \
\
asm volatile( \
"0: mvcos %1,%3,%2\n" \
"1: xr %0,%0\n" \
insn " 0,%[spec]\n" \
"0: mvcos %[_to],%[_from],%[_size]\n" \
"1: xr %[rc],%[rc]\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: lhi %0,%5\n" \
"3: lhi %[rc],%[retval]\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
: "=d" (__rc), "+Q" (*(to)) \
: "d" (size), "Q" (*(from)), \
"d" (__reg0), "K" (-EFAULT) \
: "cc"); \
: [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
: [_size] "d" (size), [_from] "Q" (*(from)), \
[retval] "K" (-EFAULT), [spec] "K" (0x81UL) \
: "cc", "0"); \
__rc; \
})
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
unsigned long spec = 0x810000UL;
int rc;
switch (size) {
case 1:
rc = __put_get_user_asm((unsigned char __user *)ptr,
(unsigned char *)x,
size, spec);
size, "llilh");
break;
case 2:
rc = __put_get_user_asm((unsigned short __user *)ptr,
(unsigned short *)x,
size, spec);
size, "llilh");
break;
case 4:
rc = __put_get_user_asm((unsigned int __user *)ptr,
(unsigned int *)x,
size, spec);
size, "llilh");
break;
case 8:
rc = __put_get_user_asm((unsigned long __user *)ptr,
(unsigned long *)x,
size, spec);
size, "llilh");
break;
default:
__put_user_bad();
@ -105,29 +104,28 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
unsigned long spec = 0x81UL;
int rc;
switch (size) {
case 1:
rc = __put_get_user_asm((unsigned char *)x,
(unsigned char __user *)ptr,
size, spec);
size, "lghi");
break;
case 2:
rc = __put_get_user_asm((unsigned short *)x,
(unsigned short __user *)ptr,
size, spec);
size, "lghi");
break;
case 4:
rc = __put_get_user_asm((unsigned int *)x,
(unsigned int __user *)ptr,
size, spec);
size, "lghi");
break;
case 8:
rc = __put_get_user_asm((unsigned long *)x,
(unsigned long __user *)ptr,
size, spec);
size, "lghi");
break;
default:
__get_user_bad();

View File

@ -4,6 +4,8 @@
#include <linux/types.h>
#ifndef __ASSEMBLY__
struct subchannel_id {
__u32 cssid : 8;
__u32 : 4;
@ -13,5 +15,6 @@ struct subchannel_id {
__u32 sch_no : 16;
} __attribute__ ((packed, aligned(4)));
#endif /* __ASSEMBLY__ */
#endif /* _UAPIASM_SCHID_H */

View File

@ -76,8 +76,7 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
if (!__test_facility(a->facility,
S390_lowcore.alt_stfle_fac_list))
if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {

View File

@ -15,6 +15,7 @@
#include <asm/idle.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/setup.h>
#include <asm/stacktrace.h>
int main(void)
@ -58,8 +59,6 @@ int main(void)
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
OFFSET(__LC_SVC_ILC, lowcore, svc_ilc);
OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code);
OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
@ -77,8 +76,6 @@ int main(void)
OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
@ -159,5 +156,7 @@ int main(void)
OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start);
OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len);
DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
/* sizeof kernel parameter area */
DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
return 0;
}

View File

@ -26,33 +26,35 @@ static char cpcmd_buf[241];
static int diag8_noresponse(int cmdlen)
{
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = cmdlen;
asm volatile(
" diag %1,%0,0x8\n"
: "+d" (reg3) : "d" (reg2) : "cc");
return reg3;
" diag %[rx],%[ry],0x8\n"
: [ry] "+&d" (cmdlen)
: [rx] "d" ((addr_t) cpcmd_buf)
: "cc");
return cmdlen;
}
static int diag8_response(int cmdlen, char *response, int *rlen)
{
unsigned long _cmdlen = cmdlen | 0x40000000L;
unsigned long _rlen = *rlen;
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
register unsigned long reg4 asm ("4") = _cmdlen;
register unsigned long reg5 asm ("5") = _rlen;
union register_pair rx, ry;
int cc;
rx.even = (addr_t) cpcmd_buf;
rx.odd = (addr_t) response;
ry.even = cmdlen | 0x40000000L;
ry.odd = *rlen;
asm volatile(
" diag %2,%0,0x8\n"
" brc 8,1f\n"
" agr %1,%4\n"
"1:\n"
: "+d" (reg4), "+d" (reg5)
: "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
*rlen = reg5;
return reg4;
" diag %[rx],%[ry],0x8\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
: [rx] "d" (rx.pair)
: "cc");
if (cc)
*rlen += ry.odd;
else
*rlen = ry.odd;
return ry.even;
}
/*

View File

@ -1418,7 +1418,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
else
except_str = "-";
caller = (unsigned long) entry->caller;
rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %pK ",
rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %px ",
area, sec, usec, level, except_str,
entry->cpu, (void *)caller);
return rc;

View File

@ -141,16 +141,15 @@ EXPORT_SYMBOL(diag14);
static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
{
register unsigned long _subcode asm("0") = *subcode;
register unsigned long _size asm("1") = size;
union register_pair rp = { .even = *subcode, .odd = size };
asm volatile(
" diag %2,%0,0x204\n"
" diag %[addr],%[rp],0x204\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
: "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
*subcode = _subcode;
return _size;
: [rp] "+&d" (rp.pair) : [addr] "d" (addr) : "memory");
*subcode = rp.even;
return rp.odd;
}
int diag204(unsigned long subcode, unsigned long size, void *addr)

View File

@ -180,11 +180,9 @@ static noinline __init void setup_lowcore_early(void)
static noinline __init void setup_facility_list(void)
{
memcpy(S390_lowcore.alt_stfle_fac_list,
S390_lowcore.stfle_fac_list,
sizeof(S390_lowcore.alt_stfle_fac_list));
memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
__clear_facility(82, alt_stfle_fac_list);
}
static __init void detect_diag9c(void)

View File

@ -129,6 +129,27 @@ _LPP_OFFSET = __LC_LPP
"jnz .+8; .long 0xb2e8d000", 82
.endm
#if IS_ENABLED(CONFIG_KVM)
/*
* The OUTSIDE macro jumps to the provided label in case the value
* in the provided register is outside of the provided range. The
* macro is useful for checking whether a PSW stored in a register
* pair points inside or outside of a block of instructions.
* @reg: register to check
* @start: start of the range
* @end: end of the range
* @outside_label: jump here if @reg is outside of [@start..@end)
*/
.macro OUTSIDE reg,start,end,outside_label
lgr %r14,\reg
larl %r13,\start
slgr %r14,%r13
lghi %r13,\end - \start
clgr %r14,%r13
jhe \outside_label
.endm
#endif
GEN_BR_THUNK %r14
GEN_BR_THUNK %r14,%r13
@ -214,7 +235,7 @@ ENTRY(sie64a)
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int
# See also .Lcleanup_sie
.Lrewind_pad6:
nopr 7
.Lrewind_pad4:
@ -276,6 +297,7 @@ ENTRY(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
lgr %r3,%r14
brasl %r14,__do_syscall
lctlg %c1,%c1,__LC_USER_ASCE
@ -318,12 +340,7 @@ ENTRY(pgm_check_handler)
.Lpgm_skip_asce:
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
larl %r13,.Lsie_gmap
slgr %r14,%r13
lghi %r13,.Lsie_done - .Lsie_gmap
clgr %r14,%r13
jhe 1f
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
@ -392,13 +409,8 @@ ENTRY(\name)
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
#if IS_ENABLED(CONFIG_KVM)
lgr %r14,%r9
larl %r13,.Lsie_gmap
slgr %r14,%r13
lghi %r13,.Lsie_done - .Lsie_gmap
clgr %r14,%r13
jhe 0f
brasl %r14,.Lcleanup_sie_int
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
brasl %r14,.Lcleanup_sie
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@ -541,13 +553,10 @@ ENTRY(mcck_int_handler)
tmhh %r8,0x0001 # interrupting from user ?
jnz .Lmcck_user
#if IS_ENABLED(CONFIG_KVM)
lgr %r14,%r9
larl %r13,.Lsie_gmap
slgr %r14,%r13
lghi %r13,.Lsie_done - .Lsie_gmap
clgr %r14,%r13
jhe .Lmcck_stack
brasl %r14,.Lcleanup_sie_mcck
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
OUTSIDE %r9,.Lsie_entry,.Lsie_skip,5f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5: brasl %r14,.Lcleanup_sie
#endif
j .Lmcck_stack
.Lmcck_user:
@ -649,21 +658,13 @@ ENDPROC(stack_overflow)
#endif
#if IS_ENABLED(CONFIG_KVM)
.Lcleanup_sie_mcck:
larl %r13,.Lsie_entry
slgr %r9,%r13
lghi %r13,.Lsie_skip - .Lsie_entry
clgr %r9,%r13
jhe .Lcleanup_sie_int
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
.Lcleanup_sie_int:
.Lcleanup_sie:
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE
larl %r9,sie_exit # skip forward to sie_exit
BR_EX %r14,%r13
#endif
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame

View File

@ -163,16 +163,18 @@ static bool reipl_ccw_clear;
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long) addr;
register unsigned long _rc asm("1") = 0;
union register_pair r1;
r1.even = (unsigned long) addr;
r1.odd = 0;
asm volatile(
" diag %0,%2,0x308\n"
" diag %[r1],%[subcode],0x308\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
: "+d" (_addr), "+d" (_rc)
: "d" (subcode) : "cc", "memory");
return _rc;
: [r1] "+&d" (r1.pair)
: [subcode] "d" (subcode)
: "cc", "memory");
return r1.odd;
}
int diag308(unsigned long subcode, void *addr)

View File

@ -146,8 +146,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
account_idle_time_irq();
do {
memcpy(&regs->int_code, &S390_lowcore.subchannel_id, 12);
if (S390_lowcore.io_int_word & BIT(31))
regs->tpi_info = S390_lowcore.tpi_info;
if (S390_lowcore.tpi_info.adapter_IO)
do_irq_async(regs, THIN_INTERRUPT);
else
do_irq_async(regs, IO_INTERRUPT);
@ -172,7 +172,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
if (user_mode(regs))
update_timer_sys();
memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
regs->int_code = S390_lowcore.ext_int_code_addr;
regs->int_parm = S390_lowcore.ext_params;
regs->int_parm_long = S390_lowcore.ext_params2;

View File

@ -17,11 +17,11 @@ static int __init nobp_setup_early(char *str)
* The user explicitely requested nobp=1, enable it and
* disable the expoline support.
*/
__set_facility(82, S390_lowcore.alt_stfle_fac_list);
__set_facility(82, alt_stfle_fac_list);
if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_disable = 1;
} else {
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
__clear_facility(82, alt_stfle_fac_list);
}
return 0;
}
@ -29,7 +29,7 @@ early_param("nobp", nobp_setup_early);
static int __init nospec_setup_early(char *str)
{
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
__clear_facility(82, alt_stfle_fac_list);
return 0;
}
early_param("nospec", nospec_setup_early);
@ -40,7 +40,7 @@ static int __init nospec_report(void)
pr_info("Spectre V2 mitigation: etokens\n");
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
if (__test_facility(82, alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0;
}
@ -66,14 +66,14 @@ void __init nospec_auto_detect(void)
*/
if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
__clear_facility(82, alt_stfle_fac_list);
} else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
*/
nospec_disable = 0;
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
__clear_facility(82, alt_stfle_fac_list);
}
/*
* If the kernel has not been compiled with expolines the
@ -86,7 +86,7 @@ static int __init spectre_v2_setup_early(char *str)
{
if (str && !strncmp(str, "on", 2)) {
nospec_disable = 0;
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
__clear_facility(82, alt_stfle_fac_list);
}
if (str && !strncmp(str, "off", 3))
nospec_disable = 1;
@ -99,6 +99,7 @@ early_param("spectre_v2", spectre_v2_setup_early);
static void __init_or_module __nospec_revert(s32 *start, s32 *end)
{
enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
static const u8 branch[] = { 0x47, 0x00, 0x07, 0x00 };
u8 *instr, *thunk, *br;
u8 insnbuf[6];
s32 *epo;
@ -128,7 +129,7 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
memcpy(insnbuf + 2, branch, sizeof(branch));
switch (type) {
case BRCL_EXPOLINE:
insnbuf[0] = br[0];

View File

@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Mitigation: etokens\n");
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
if (__test_facility(82, alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n");
}

View File

@ -362,15 +362,9 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
if (!(hwc->state & PERF_HES_STOPPED))
return;
if (WARN_ON_ONCE(hwc->config == -1))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* (Re-)enable and activate the counter set */
@ -413,15 +407,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
/* Check authorization for the counter set to which this
* counter belongs.
* For group events transaction, the authorization check is
* done in cpumf_pmu_commit_txn().
*/
if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
if (validate_ctr_auth(&event->hw))
return -ENOENT;
ctr_set_enable(&cpuhw->state, event->hw.config_base);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@ -449,78 +434,6 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
ctr_set_disable(&cpuhw->state, event->hw.config_base);
}
/*
* Start group events scheduling transaction.
* Set flags to perform a single test at commit time.
*
* We only support PERF_PMU_TXN_ADD transactions. Save the
* transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
* transactions.
*/
static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
cpuhw->txn_flags = txn_flags;
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_disable(pmu);
cpuhw->tx_state = cpuhw->state;
}
/*
* Stop and cancel a group events scheduling tranctions.
* Assumes cpumf_pmu_del() is called for each successful added
* cpumf_pmu_add() during the transaction.
*/
static void cpumf_pmu_cancel_txn(struct pmu *pmu)
{
unsigned int txn_flags;
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
txn_flags = cpuhw->txn_flags;
cpuhw->txn_flags = 0;
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
WARN_ON(cpuhw->tx_state != cpuhw->state);
perf_pmu_enable(pmu);
}
/*
* Commit the group events scheduling transaction. On success, the
* transaction is closed. On error, the transaction is kept open
* until cpumf_pmu_cancel_txn() is called.
*/
static int cpumf_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
u64 state;
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
cpuhw->txn_flags = 0;
return 0;
}
/* check if the updated state can be scheduled */
state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
state >>= CPUMF_LCCTL_ENABLE_SHIFT;
if ((state & cpuhw->info.auth_ctl) != state)
return -ENOENT;
cpuhw->txn_flags = 0;
perf_pmu_enable(pmu);
return 0;
}
/* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = {
.task_ctx_nr = perf_sw_context,
@ -533,9 +446,6 @@ static struct pmu cpumf_pmu = {
.start = cpumf_pmu_start,
.stop = cpumf_pmu_stop,
.read = cpumf_pmu_read,
.start_txn = cpumf_pmu_start_txn,
.commit_txn = cpumf_pmu_commit_txn,
.cancel_txn = cpumf_pmu_cancel_txn,
};
static int __init cpumf_pmu_init(void)

View File

@ -30,7 +30,6 @@ DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
.alert = ATOMIC64_INIT(0),
.state = 0,
.flags = 0,
.txn_flags = 0,
};
/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
static bool cpum_cf_initalized;

View File

@ -103,11 +103,9 @@ EXPORT_SYMBOL(cpu_have_feature);
static void show_facilities(struct seq_file *m)
{
unsigned int bit;
long *facilities;
facilities = (long *)&S390_lowcore.stfle_fac_list;
seq_puts(m, "facilities :");
for_each_set_bit_inv(bit, facilities, MAX_FACILITY_BIT)
for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
seq_printf(m, " %d", bit);
seq_putc(m, '\n');
}

View File

@ -975,10 +975,12 @@ static int s390_tdb_get(struct task_struct *target,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
size_t size;
if (!(regs->int_code & 0x200))
return -ENODATA;
return membuf_write(&to, target->thread.trap_tdb, 256);
size = sizeof(target->thread.trap_tdb.data);
return membuf_write(&to, target->thread.trap_tdb.data, size);
}
static int s390_tdb_set(struct task_struct *target,

View File

@ -96,7 +96,6 @@ unsigned long int_hwcap = 0;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
unsigned long __bootdata(vmalloc_size);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
@ -108,6 +107,9 @@ unsigned long __bootdata_preserved(__edma);
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@ -165,7 +167,7 @@ static void __init set_preferred_console(void)
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
else if (CONSOLE_IS_VT220)
add_preferred_console("ttyS", 1, NULL);
add_preferred_console("ttysclp", 0, NULL);
else if (CONSOLE_IS_HVC)
add_preferred_console("hvc", 0, NULL);
}
@ -338,27 +340,6 @@ int __init arch_early_irq_init(void)
return 0;
}
static int __init stack_realloc(void)
{
unsigned long old, new;
old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
new = stack_alloc();
if (!new)
panic("Couldn't allocate async stack");
WRITE_ONCE(S390_lowcore.async_stack, new + STACK_INIT_OFFSET);
free_pages(old, THREAD_SIZE_ORDER);
old = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
new = stack_alloc();
if (!new)
panic("Couldn't allocate machine check stack");
WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
memblock_free_late(old, THREAD_SIZE);
return 0;
}
early_initcall(stack_realloc);
void __init arch_call_rest_init(void)
{
unsigned long stack;
@ -413,11 +394,6 @@ static void __init setup_lowcore_dat_off(void)
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
sizeof(lc->stfle_fac_list));
memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
sizeof(lc->alt_stfle_fac_list));
nmi_alloc_boot_cpu(lc);
lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@ -568,53 +544,10 @@ static void __init setup_resources(void)
#endif
}
static void __init setup_ident_map_size(void)
static void __init setup_memory_end(void)
{
unsigned long vmax, tmp;
/* Choose kernel address space layout: 3 or 4 levels. */
tmp = ident_map_size / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
if (is_prot_virt_host())
adjust_to_uv_max(&MODULES_END);
#ifdef CONFIG_KASAN
vmax = _REGION1_SIZE;
MODULES_END = kasan_vmax;
#endif
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
VMALLOC_START = VMALLOC_END - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
tmp = SECTION_ALIGN_UP(tmp);
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
vmemmap = (struct page *) tmp;
/* Take care that ident_map_size <= vmemmap */
ident_map_size = min(ident_map_size, (unsigned long)vmemmap);
#ifdef CONFIG_KASAN
ident_map_size = min(ident_map_size, KASAN_SHADOW_START);
#endif
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
#ifdef CONFIG_KASAN
/* move vmemmap above kasan shadow only if stands in a way */
if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
(unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
#endif
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
memblock_remove(ident_map_size, ULONG_MAX);
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
@ -652,30 +585,6 @@ static void __init reserve_above_ident_map(void)
memblock_reserve(ident_map_size, ULONG_MAX);
}
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
static void __init reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
/* Forget all memory above the running kdump system */
memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
#endif
}
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
static void __init remove_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
/* Forget all memory above the running kdump system */
memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
#endif
}
/*
* Reserve memory for kdump kernel to be loaded with kexec
*/
@ -1141,7 +1050,6 @@ void __init setup_arch(char **cmdline_p)
/* Do some memory reservations *before* memory is added to memblock */
reserve_above_ident_map();
reserve_oldmem();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
@ -1152,10 +1060,9 @@ void __init setup_arch(char **cmdline_p)
memblock_add_mem_detect_info();
free_mem_detect_info();
remove_oldmem();
setup_uv();
setup_ident_map_size();
setup_memory_end();
setup_memory();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();

View File

@ -74,7 +74,6 @@ enum {
static DEFINE_PER_CPU(struct cpu *, cpu_device);
struct pcpu {
struct lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
signed char state; /* physical cpu state */
@ -194,20 +193,12 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!pcpu->lowcore || !nodat_stack)
goto out;
} else {
nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
}
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
if (!async_stack || !mcck_stack)
goto out_stack;
lc = pcpu->lowcore;
if (!lc || !nodat_stack || !async_stack || !mcck_stack)
goto out;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
@ -220,45 +211,42 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
goto out_stack;
goto out;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
out_stack:
out:
stack_free(mcck_stack);
stack_free(async_stack);
out:
if (pcpu != &pcpu_devices[0]) {
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) lc, LC_ORDER);
return -ENOMEM;
}
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
unsigned long async_stack, nodat_stack, mcck_stack, lowcore;
nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET;
lowcore = (unsigned long) pcpu->lowcore;
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
int cpu;
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
async_stack = lc->async_stack - STACK_INIT_OFFSET;
mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
nmi_free_per_cpu(pcpu->lowcore);
lowcore_ptr[cpu] = NULL;
nmi_free_per_cpu(lc);
stack_free(async_stack);
stack_free(mcck_stack);
if (pcpu == &pcpu_devices[0])
return;
free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages(lowcore, LC_ORDER);
free_pages((unsigned long) lc, LC_ORDER);
}
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
struct lowcore *lc = pcpu->lowcore;
struct lowcore *lc = lowcore_ptr[cpu];
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
@ -275,17 +263,16 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->cregs_save_area[1] = lc->kernel_asce;
lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
sizeof(lc->stfle_fac_list));
memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
sizeof(lc->alt_stfle_fac_list));
arch_spin_lock_setup(cpu);
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct lowcore *lc = pcpu->lowcore;
struct lowcore *lc;
int cpu;
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) tsk;
@ -301,8 +288,11 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
struct lowcore *lc = pcpu->lowcore;
struct lowcore *lc;
int cpu;
cpu = pcpu - pcpu_devices;
lc = lowcore_ptr[cpu];
lc->restart_stack = lc->nodat_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
@ -387,7 +377,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct lowcore *lc = pcpu_devices->lowcore;
struct lowcore *lc = lowcore_ptr[0];
if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;
@ -600,18 +590,21 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
int smp_store_status(int cpu)
{
struct pcpu *pcpu = pcpu_devices + cpu;
struct lowcore *lc;
struct pcpu *pcpu;
unsigned long pa;
pa = __pa(&pcpu->lowcore->floating_pt_save_area);
pcpu = pcpu_devices + cpu;
lc = lowcore_ptr[cpu];
pa = __pa(&lc->floating_pt_save_area);
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0;
pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_GS)
pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@ -1011,7 +1004,6 @@ void __init smp_prepare_boot_cpu(void)
WARN_ON(!cpu_present(0) || !cpu_online(0));
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
@ -1237,3 +1229,54 @@ out:
return rc;
}
subsys_initcall(s390_smp_init);
static __always_inline void set_new_lowcore(struct lowcore *lc)
{
union register_pair dst, src;
u32 pfx;
src.even = (unsigned long) &S390_lowcore;
src.odd = sizeof(S390_lowcore);
dst.even = (unsigned long) lc;
dst.odd = sizeof(*lc);
pfx = (unsigned long) lc;
asm volatile(
" mvcl %[dst],%[src]\n"
" spx %[pfx]\n"
: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
: [pfx] "Q" (pfx)
: "memory", "cc");
}
static int __init smp_reinit_ipl_cpu(void)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc, *lc_ipl;
unsigned long flags;
lc_ipl = lowcore_ptr[0];
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
if (!lc || !nodat_stack || !async_stack || !mcck_stack)
panic("Couldn't allocate memory");
local_irq_save(flags);
local_mcck_disable();
set_new_lowcore(lc);
S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
lowcore_ptr[0] = lc;
local_mcck_enable();
local_irq_restore(flags);
free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
return 0;
}
early_initcall(smp_reinit_ipl_cpu);

View File

@ -395,19 +395,18 @@ out:
static int sthyi(u64 vaddr, u64 *rc)
{
register u64 code asm("0") = 0;
register u64 addr asm("2") = vaddr;
register u64 rcode asm("3");
union register_pair r1 = { .even = 0, }; /* subcode */
union register_pair r2 = { .even = vaddr, };
int cc;
asm volatile(
".insn rre,0xB2560000,%[code],%[addr]\n"
".insn rre,0xB2560000,%[r1],%[r2]\n"
"ipm %[cc]\n"
"srl %[cc],28\n"
: [cc] "=d" (cc), "=d" (rcode)
: [code] "d" (code), [addr] "a" (addr)
: [cc] "=&d" (cc), [r2] "+&d" (r2.pair)
: [r1] "d" (r1.pair)
: "memory", "cc");
*rc = rcode;
*rc = r2.odd;
return cc;
}

View File

@ -144,11 +144,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
add_random_kstack_offset();
enter_from_user_mode(regs);
memcpy(&regs->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long));
memcpy(&regs->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code));
regs->psw = S390_lowcore.svc_old_psw;
regs->int_code = S390_lowcore.svc_int_code;
update_timer_sys();
local_irq_enable();

View File

@ -25,19 +25,22 @@ int topology_max_mnest;
static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
int r0 = (fc << 28) | sel1;
int rc = 0;
asm volatile(
" stsi 0(%3)\n"
" lr 0,%[r0]\n"
" lr 1,%[r1]\n"
" stsi 0(%[sysinfo])\n"
"0: jz 2f\n"
"1: lhi %1,%4\n"
"2:\n"
"1: lhi %[rc],%[retval]\n"
"2: lr %[r0],0\n"
EX_TABLE(0b, 1b)
: "+d" (r0), "+d" (rc)
: "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
: "cc", "memory");
: [r0] "+d" (r0), [rc] "+d" (rc)
: [r1] "d" (sel2),
[sysinfo] "a" (sysinfo),
[retval] "K" (-EOPNOTSUPP)
: "cc", "0", "1", "memory");
*lvl = ((unsigned int) r0) >> 28;
return rc;
}

View File

@ -36,7 +36,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
unsigned long address;
if (regs->int_code & 0x200)
address = *(unsigned long *)(current->thread.trap_tdb + 24);
address = current->thread.trap_tdb.data[3];
else
address = regs->psw.addr;
return (void __user *) (address - (regs->int_code >> 16));
@ -318,7 +318,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
if (S390_lowcore.pgm_code & 0x0200) {
/* transaction abort */
memcpy(&current->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256);
current->thread.trap_tdb = S390_lowcore.pgm_tdb;
}
if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {

View File

@ -52,7 +52,7 @@ void __init setup_uv(void)
unsigned long uv_stor_base;
/*
* keep these conditions in line with kasan init code has_uv_sec_stor_limit()
* keep these conditions in line with has_uv_sec_stor_limit()
*/
if (!is_prot_virt_host())
return;
@ -91,12 +91,6 @@ fail:
prot_virt_host = 0;
}
void adjust_to_uv_max(unsigned long *vmax)
{
if (uv_info.max_sec_stor_addr)
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
}
/*
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.

View File

@ -234,7 +234,7 @@ static unsigned long kvm_s390_fac_size(void)
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
sizeof(S390_lowcore.stfle_fac_list));
sizeof(stfle_fac_list));
return SIZE_INTERNAL;
}
@ -1482,8 +1482,8 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
mach->ibc = sclp.ibc;
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
sizeof(S390_lowcore.stfle_fac_list));
memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
sizeof(stfle_fac_list));
VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
@ -2707,10 +2707,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) {
kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
(kvm_s390_fac_base[i] |
kvm_s390_fac_ext[i]);
kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
kvm_s390_fac_base[i];
}
kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
@ -5079,7 +5079,7 @@ static int __init kvm_s390_init(void)
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
stfle_fac_list[i] & nonhyp_mask(i);
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}

View File

@ -18,23 +18,30 @@
*/
static inline char *__strend(const char *s)
{
register unsigned long r0 asm("0") = 0;
unsigned long e = 0;
asm volatile ("0: srst %0,%1\n"
" jo 0b"
: "+d" (r0), "+a" (s) : : "cc", "memory");
return (char *) r0;
asm volatile(
" lghi 0,0\n"
"0: srst %[e],%[s]\n"
" jo 0b\n"
: [e] "+&a" (e), [s] "+&a" (s)
:
: "cc", "memory", "0");
return (char *)e;
}
static inline char *__strnend(const char *s, size_t n)
{
register unsigned long r0 asm("0") = 0;
const char *p = s + n;
asm volatile ("0: srst %0,%1\n"
" jo 0b"
: "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
return (char *) p;
asm volatile(
" lghi 0,0\n"
"0: srst %[p],%[s]\n"
" jo 0b\n"
: [p] "+&d" (p), [s] "+&a" (s)
:
: "cc", "memory", "0");
return (char *)p;
}
/**
@ -76,13 +83,15 @@ EXPORT_SYMBOL(strnlen);
#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
{
register int r0 asm("0") = 0;
char *ret = dest;
asm volatile ("0: mvst %0,%1\n"
" jo 0b"
: "+&a" (dest), "+&a" (src) : "d" (r0)
: "cc", "memory" );
asm volatile(
" lghi 0,0\n"
"0: mvst %[dest],%[src]\n"
" jo 0b\n"
: [dest] "+&a" (dest), [src] "+&a" (src)
:
: "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcpy);
@ -144,16 +153,18 @@ EXPORT_SYMBOL(strncpy);
#ifdef __HAVE_ARCH_STRCAT
char *strcat(char *dest, const char *src)
{
register int r0 asm("0") = 0;
unsigned long dummy;
unsigned long dummy = 0;
char *ret = dest;
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
"1: mvst %0,%2\n"
" jo 1b"
: "=&a" (dummy), "+a" (dest), "+a" (src)
: "d" (r0), "0" (0UL) : "cc", "memory" );
asm volatile(
" lghi 0,0\n"
"0: srst %[dummy],%[dest]\n"
" jo 0b\n"
"1: mvst %[dummy],%[src]\n"
" jo 1b\n"
: [dummy] "=&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src)
:
: "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcat);
@ -221,18 +232,20 @@ EXPORT_SYMBOL(strncat);
#ifdef __HAVE_ARCH_STRCMP
int strcmp(const char *s1, const char *s2)
{
register int r0 asm("0") = 0;
int ret = 0;
asm volatile ("0: clst %2,%3\n"
" jo 0b\n"
" je 1f\n"
" ic %0,0(%2)\n"
" ic %1,0(%3)\n"
" sr %0,%1\n"
"1:"
: "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2)
: : "cc", "memory");
asm volatile(
" lghi 0,0\n"
"0: clst %[s1],%[s2]\n"
" jo 0b\n"
" je 1f\n"
" ic %[ret],0(%[s1])\n"
" ic 0,0(%[s2])\n"
" sr %[ret],0\n"
"1:"
: [ret] "+&d" (ret), [s1] "+&a" (s1), [s2] "+&a" (s2)
:
: "cc", "memory", "0");
return ret;
}
EXPORT_SYMBOL(strcmp);
@ -261,18 +274,18 @@ EXPORT_SYMBOL(strrchr);
static inline int clcle(const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
register unsigned long r2 asm("2") = (unsigned long) s1;
register unsigned long r3 asm("3") = (unsigned long) l1;
register unsigned long r4 asm("4") = (unsigned long) s2;
register unsigned long r5 asm("5") = (unsigned long) l2;
union register_pair r1 = { .even = (unsigned long)s1, .odd = l1, };
union register_pair r3 = { .even = (unsigned long)s2, .odd = l2, };
int cc;
asm volatile ("0: clcle %1,%3,0\n"
" jo 0b\n"
" ipm %0\n"
" srl %0,28"
: "=&d" (cc), "+a" (r2), "+a" (r3),
"+a" (r4), "+a" (r5) : : "cc", "memory");
asm volatile(
"0: clcle %[r1],%[r3],0\n"
" jo 0b\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [r1] "+&d" (r1.pair), [r3] "+&d" (r3.pair)
:
: "cc", "memory");
return cc;
}
@ -315,15 +328,18 @@ EXPORT_SYMBOL(strstr);
#ifdef __HAVE_ARCH_MEMCHR
void *memchr(const void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
" jl 1f\n"
" la %0,0\n"
"1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
asm volatile(
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
" jo 0b\n"
" jl 1f\n"
" la %[ret],0\n"
"1:"
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
@ -360,13 +376,16 @@ EXPORT_SYMBOL(memcmp);
#ifdef __HAVE_ARCH_MEMSCAN
void *memscan(void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
asm volatile(
" lgr 0,%[c]\n"
"0: srst %[ret],%[s]\n"
" jo 0b\n"
: [ret] "+&a" (ret), [s] "+&a" (s)
: [c] "d" (c)
: "cc", "memory", "0");
return (void *)ret;
}
EXPORT_SYMBOL(memscan);
#endif

View File

@ -61,11 +61,11 @@ static inline int copy_with_mvcos(void)
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
register unsigned long reg0 asm("0") = 0x81UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
" lghi 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@ -84,7 +84,8 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
: [spec] "K" (0x81UL)
: "cc", "memory", "0");
return size;
}
@ -133,11 +134,11 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
" llilh 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@ -156,7 +157,8 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
: [spec] "K" (0x81UL)
: "cc", "memory", "0");
return size;
}
@ -205,12 +207,12 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
register unsigned long reg0 asm("0") = 0x810081UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
/* FIXME: copy with reduced length. */
asm volatile(
" lgr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
" jz 2f\n"
"1: algr %0,%3\n"
@ -221,7 +223,8 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
"3: \n"
EX_TABLE(0b,3b)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
: [spec] "d" (0x810081UL)
: "cc", "memory");
return size;
}
@ -266,11 +269,11 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
asm volatile(
" llilh 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
"1: algr %0,%2\n"
@ -288,7 +291,8 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
: "a" (empty_zero_page), [spec] "K" (0x81UL)
: "cc", "memory", "0");
return size;
}
@ -338,10 +342,10 @@ EXPORT_SYMBOL(__clear_user);
static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long size)
{
register unsigned long reg0 asm("0") = 0;
unsigned long tmp1, tmp2;
asm volatile(
" lghi 0,0\n"
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
" slgr %0,%0\n"
@ -353,7 +357,8 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
"1: sacf 768\n"
EX_TABLE(0b,1b)
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
:
: "cc", "memory", "0");
return size;
}

View File

@ -91,9 +91,6 @@ static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
/* Get around a gcc oddity */
register unsigned long *reg7 asm ("7") = p5;
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
@ -122,7 +119,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" xc 0(1,%1),0(%5)\n"
"3:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
"+a" (reg7)
"+a" (p5)
: : "0", "1", "cc", "memory");
}

View File

@ -13,7 +13,6 @@
#include <asm/setup.h>
#include <asm/uv.h>
unsigned long kasan_vmax;
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata;
@ -251,28 +250,9 @@ static void __init kasan_early_detect_facilities(void)
}
}
static bool __init has_uv_sec_stor_limit(void)
{
/*
* keep these conditions in line with setup_uv()
*/
if (!is_prot_virt_host())
return false;
if (is_prot_virt_guest())
return false;
if (!test_facility(158))
return false;
return !!uv_info.max_sec_stor_addr;
}
void __init kasan_early_init(void)
{
unsigned long untracked_mem_end;
unsigned long shadow_alloc_size;
unsigned long vmax_unlimited;
unsigned long initrd_end;
unsigned long memsize;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
@ -306,9 +286,6 @@ void __init kasan_early_init(void)
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
if (has_uv_sec_stor_limit())
kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
@ -375,18 +352,18 @@ void __init kasan_early_init(void)
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax),
kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end),
kasan_early_pgtable_populate(__sha(ident_map_size),
IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
__sha(VMALLOC_START) :
__sha(MODULES_VADDR),
POPULATE_ZERO_SHADOW);
kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited),
kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;

View File

@ -79,22 +79,21 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
register unsigned long _src asm("4") = (unsigned long) src;
register unsigned long _len2 asm("5") = (unsigned long) count;
union register_pair _dst, _src;
int rc = -EFAULT;
_dst.even = (unsigned long) dest;
_dst.odd = (unsigned long) count;
_src.even = (unsigned long) src;
_src.odd = (unsigned long) count;
asm volatile (
"0: mvcle %1,%2,0x0\n"
"0: mvcle %[dst],%[src],0\n"
"1: jo 0b\n"
" lhi %0,0x0\n"
" lhi %[rc],0\n"
"2:\n"
EX_TABLE(1b,2b)
: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
"+d" (_len2), "=m" (*((long *) dest))
: "m" (*((long *) src))
: "cc", "memory");
: [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
: : "cc", "memory");
return rc;
}

View File

@ -31,17 +31,17 @@ __setup("cmma=", cmma);
static inline int cmma_test_essa(void)
{
register unsigned long tmp asm("0") = 0;
register int rc asm("1");
unsigned long tmp = 0;
int rc = -EOPNOTSUPP;
/* test ESSA_GET_STATE */
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
"0: la %0,0\n"
" .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
"0: la %[rc],0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=&d" (rc), "+&d" (tmp)
: "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
: [rc] "+&d" (rc), [tmp] "+&d" (tmp)
: [cmd] "i" (ESSA_GET_STATE));
return rc;
}

View File

@ -63,16 +63,15 @@ u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
register u64 __addr asm("2") = addr;
register u64 __range asm("3") = range;
union register_pair addr_range = {.even = addr, .odd = range};
u8 cc;
asm volatile (
" .insn rre,0xb9d30000,%[fn],%[addr]\n"
" .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
: [addr] "d" (__addr), "d" (__range)
: [addr_range] "d" (addr_range.pair)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
@ -113,21 +112,19 @@ int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
/* PCI Load */
static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
u64 __data;
asm volatile (
" .insn rre,0xb9d20000,%[data],%[req]\n"
" .insn rre,0xb9d20000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
: "d" (__offset)
: "cc");
*status = __req >> 24 & 0xff;
: [cc] "+d" (cc), [data] "=d" (__data),
[req_off] "+&d" (req_off.pair) :: "cc");
*status = req_off.even >> 24 & 0xff;
*data = __data;
return cc;
}
@ -173,21 +170,19 @@ static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
{
register u64 addr asm("2") = ioaddr;
register u64 r3 asm("3") = len;
union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
u64 __data;
asm volatile (
" .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
" .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
: [ioaddr] "d" (addr)
: "cc");
*status = r3 >> 24 & 0xff;
: [cc] "+d" (cc), [data] "=d" (__data),
[ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
*status = ioaddr_len.odd >> 24 & 0xff;
*data = __data;
return cc;
}
@ -211,20 +206,19 @@ EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
union register_pair req_off = {.even = req, .odd = offset};
int cc = -ENXIO;
asm volatile (
" .insn rre,0xb9d00000,%[data],%[req]\n"
" .insn rre,0xb9d00000,%[data],%[req_off]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [req] "+d" (__req)
: "d" (__offset), [data] "d" (data)
: [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
: [data] "d" (data)
: "cc");
*status = __req >> 24 & 0xff;
*status = req_off.even >> 24 & 0xff;
return cc;
}
@ -257,20 +251,19 @@ static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
{
register u64 addr asm("2") = ioaddr;
register u64 r3 asm("3") = len;
union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
int cc = -ENXIO;
asm volatile (
" .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
" .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), "+d" (r3)
: [data] "d" (data), [ioaddr] "d" (addr)
: "cc");
*status = r3 >> 24 & 0xff;
: [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
: [data] "d" (data)
: "cc", "memory");
*status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}

View File

@ -35,7 +35,7 @@ static struct airq_iv *zpci_sbv;
*/
static struct airq_iv **zpci_ibv;
/* Modify PCI: Register adapter interruptions */
/* Modify PCI: Register floating adapter interruptions */
static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
@ -53,7 +53,7 @@ static int zpci_set_airq(struct zpci_dev *zdev)
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
}
/* Modify PCI: Unregister adapter interruptions */
/* Modify PCI: Unregister floating adapter interruptions */
static int zpci_clear_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
@ -98,6 +98,38 @@ static int zpci_clear_directed_irq(struct zpci_dev *zdev)
return cc ? -EIO : 0;
}
/* Register adapter interruptions */
int zpci_set_irq(struct zpci_dev *zdev)
{
int rc;
if (irq_delivery == DIRECTED)
rc = zpci_set_directed_irq(zdev);
else
rc = zpci_set_airq(zdev);
if (!rc)
zdev->irqs_registered = 1;
return rc;
}
/* Clear adapter interruptions */
int zpci_clear_irq(struct zpci_dev *zdev)
{
int rc;
if (irq_delivery == DIRECTED)
rc = zpci_clear_directed_irq(zdev);
else
rc = zpci_clear_airq(zdev);
if (!rc)
zdev->irqs_registered = 0;
return rc;
}
static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
bool force)
{
@ -311,10 +343,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = msi_vecs;
if (irq_delivery == DIRECTED)
rc = zpci_set_directed_irq(zdev);
else
rc = zpci_set_airq(zdev);
rc = zpci_set_irq(zdev);
if (rc)
return rc;
@ -328,10 +357,7 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
int rc;
/* Disable interrupts */
if (irq_delivery == DIRECTED)
rc = zpci_clear_directed_irq(zdev);
else
rc = zpci_clear_airq(zdev);
rc = zpci_clear_irq(zdev);
if (rc)
return;

View File

@ -49,8 +49,7 @@ static inline int __pcistg_mio_inuser(
void __iomem *ioaddr, const void __user *src,
u64 ulen, u8 *status)
{
register u64 addr asm("2") = (u64 __force) ioaddr;
register u64 len asm("3") = ulen;
union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
int cc = -ENXIO;
u64 val = 0;
u64 cnt = ulen;
@ -68,7 +67,7 @@ static inline int __pcistg_mio_inuser(
" aghi %[src],1\n"
" ogr %[val],%[tmp]\n"
" brctg %[cnt],0b\n"
"1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n"
"1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
"2: ipm %[cc]\n"
" srl %[cc],28\n"
"3: sacf 768\n"
@ -76,10 +75,9 @@ static inline int __pcistg_mio_inuser(
:
[src] "+a" (src), [cnt] "+d" (cnt),
[val] "+d" (val), [tmp] "=d" (tmp),
[len] "+d" (len), [cc] "+d" (cc),
[ioaddr] "+a" (addr)
[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
:: "cc", "memory");
*status = len >> 24 & 0xff;
*status = ioaddr_len.odd >> 24 & 0xff;
/* did we read everything from user memory? */
if (!cc && cnt != 0)
@ -195,8 +193,7 @@ static inline int __pcilg_mio_inuser(
void __user *dst, const void __iomem *ioaddr,
u64 ulen, u8 *status)
{
register u64 addr asm("2") = (u64 __force) ioaddr;
register u64 len asm("3") = ulen;
union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
u64 cnt = ulen;
int shift = ulen * 8;
int cc = -ENXIO;
@ -209,7 +206,7 @@ static inline int __pcilg_mio_inuser(
*/
asm volatile (
" sacf 256\n"
"0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n"
"0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
"1: ipm %[cc]\n"
" srl %[cc],28\n"
" ltr %[cc],%[cc]\n"
@ -222,18 +219,17 @@ static inline int __pcilg_mio_inuser(
"4: sacf 768\n"
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
:
[cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
[ioaddr_len] "+&d" (ioaddr_len.pair),
[cc] "+d" (cc), [val] "=d" (val),
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
[shift] "+d" (shift)
:
[ioaddr] "a" (addr)
: "cc", "memory");
:: "cc", "memory");
/* did we write everything to the user space buffer? */
if (!cc && cnt != 0)
cc = -EFAULT;
*status = len >> 24 & 0xff;
*status = ioaddr_len.odd >> 24 & 0xff;
return cc;
}

View File

@ -24,6 +24,7 @@ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))

View File

@ -69,25 +69,24 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
* resulting condition code and DIAG return code. */
static inline int __dia250(void *iob, int cmd)
{
register unsigned long reg2 asm ("2") = (unsigned long) iob;
union register_pair rx = { .even = (unsigned long)iob, };
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
int rc;
int cc;
rc = 3;
cc = 3;
asm volatile(
" diag 2,%2,0x250\n"
"0: ipm %0\n"
" srl %0,28\n"
" or %0,3\n"
" diag %[rx],%[cmd],0x250\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc), "=m" (*(addr_type *) iob)
: "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
: "3", "cc");
return rc;
: [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
: [cmd] "d" (cmd)
: "cc");
return cc | rx.odd;
}
static inline int dia250(void *iob, int cmd)

View File

@ -17,7 +17,6 @@
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h>
@ -983,95 +982,12 @@ dcssblk_check_params(void)
}
}
/*
* Suspend / Resume
*/
static int dcssblk_freeze(struct device *dev)
{
struct dcssblk_dev_info *dev_info;
int rc = 0;
list_for_each_entry(dev_info, &dcssblk_devices, lh) {
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
if (!dev_info->is_shared)
rc = -EINVAL;
break;
default:
rc = -EINVAL;
break;
}
if (rc)
break;
}
if (rc)
pr_err("Suspending the system failed because DCSS device %s "
"is writable\n",
dev_info->segment_name);
return rc;
}
static int dcssblk_restore(struct device *dev)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
unsigned long start, end;
int rc = 0;
list_for_each_entry(dev_info, &dcssblk_devices, lh) {
list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_unload(entry->segment_name);
rc = segment_load(entry->segment_name, SEGMENT_SHARED,
&start, &end);
if (rc < 0) {
// TODO in_use check ?
segment_warning(rc, entry->segment_name);
goto out_panic;
}
if (start != entry->start || end != entry->end) {
pr_err("The address range of DCSS %s changed "
"while the system was suspended\n",
entry->segment_name);
goto out_panic;
}
}
}
return 0;
out_panic:
panic("fatal dcssblk resume error\n");
}
static int dcssblk_thaw(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops dcssblk_pm_ops = {
.freeze = dcssblk_freeze,
.thaw = dcssblk_thaw,
.restore = dcssblk_restore,
};
static struct platform_driver dcssblk_pdrv = {
.driver = {
.name = "dcssblk",
.pm = &dcssblk_pm_ops,
},
};
static struct platform_device *dcssblk_pdev;
/*
* The init/exit functions.
*/
static void __exit
dcssblk_exit(void)
{
platform_device_unregister(dcssblk_pdev);
platform_driver_unregister(&dcssblk_pdrv);
root_device_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
}
@ -1081,22 +997,9 @@ dcssblk_init(void)
{
int rc;
rc = platform_driver_register(&dcssblk_pdrv);
if (rc)
return rc;
dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
0);
if (IS_ERR(dcssblk_pdev)) {
rc = PTR_ERR(dcssblk_pdev);
goto out_pdrv;
}
dcssblk_root_dev = root_device_register("dcssblk");
if (IS_ERR(dcssblk_root_dev)) {
rc = PTR_ERR(dcssblk_root_dev);
goto out_pdev;
}
if (IS_ERR(dcssblk_root_dev))
return PTR_ERR(dcssblk_root_dev);
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc)
goto out_root;
@ -1114,10 +1017,7 @@ dcssblk_init(void)
out_root:
root_device_unregister(dcssblk_root_dev);
out_pdev:
platform_device_unregister(dcssblk_pdev);
out_pdrv:
platform_driver_unregister(&dcssblk_pdrv);
return rc;
}

View File

@ -39,8 +39,6 @@
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/device.h>
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
@ -140,7 +138,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
/*
* Check if xpram is available.
*/
static int xpram_present(void)
static int __init xpram_present(void)
{
unsigned long mem_page;
int rc;
@ -156,7 +154,7 @@ static int xpram_present(void)
/*
* Return index of the last available xpram page.
*/
static unsigned long xpram_highest_page_index(void)
static unsigned long __init xpram_highest_page_index(void)
{
unsigned int page_index, add_bit;
unsigned long mem_page;
@ -383,42 +381,6 @@ out:
return rc;
}
/*
* Resume failed: Print error message and call panic.
*/
static void xpram_resume_error(const char *message)
{
pr_err("Resuming the system failed: %s\n", message);
panic("xpram resume error\n");
}
/*
* Check if xpram setup changed between suspend and resume.
*/
static int xpram_restore(struct device *dev)
{
if (!xpram_pages)
return 0;
if (xpram_present() != 0)
xpram_resume_error("xpram disappeared");
if (xpram_pages != xpram_highest_page_index() + 1)
xpram_resume_error("Size of xpram changed");
return 0;
}
static const struct dev_pm_ops xpram_pm_ops = {
.restore = xpram_restore,
};
static struct platform_driver xpram_pdrv = {
.driver = {
.name = XPRAM_NAME,
.pm = &xpram_pm_ops,
},
};
static struct platform_device *xpram_pdev;
/*
* Finally, the init/exit functions.
*/
@ -430,8 +392,6 @@ static void __exit xpram_exit(void)
blk_cleanup_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
platform_device_unregister(xpram_pdev);
platform_driver_unregister(&xpram_pdrv);
}
static int __init xpram_init(void)
@ -449,24 +409,7 @@ static int __init xpram_init(void)
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
rc = platform_driver_register(&xpram_pdrv);
if (rc)
return rc;
xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
if (IS_ERR(xpram_pdev)) {
rc = PTR_ERR(xpram_pdev);
goto fail_platform_driver_unregister;
}
rc = xpram_setup_blkdev();
if (rc)
goto fail_platform_device_unregister;
return 0;
fail_platform_device_unregister:
platform_device_unregister(xpram_pdev);
fail_platform_driver_unregister:
platform_driver_unregister(&xpram_pdrv);
return rc;
return xpram_setup_blkdev();
}
module_init(xpram_init);

View File

@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <linux/uaccess.h>
@ -79,8 +78,6 @@ static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static struct device *monreader_device;
/******************************************************************************
* helper functions *
*****************************************************************************/
@ -319,7 +316,6 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path;
}
filp->private_data = monpriv;
dev_set_drvdata(monreader_device, monpriv);
return nonseekable_open(inode, filp);
out_path:
@ -354,7 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp)
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
dev_set_drvdata(monreader_device, NULL);
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
@ -456,94 +451,6 @@ static struct miscdevice mon_dev = {
.minor = MISC_DYNAMIC_MINOR,
};
/******************************************************************************
* suspend / resume *
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
monpriv->path = NULL;
return 0;
}
static int monreader_thaw(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
rc = -ENOMEM;
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
goto out_path;
}
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed))
goto out_path;
return 0;
out_path:
rc = -EIO;
iucv_path_free(monpriv->path);
monpriv->path = NULL;
out:
atomic_set(&monpriv->iucv_severed, 1);
return rc;
}
static int monreader_restore(struct device *dev)
{
int rc;
segment_unload(mon_dcss_name);
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
panic("fatal monreader resume error: no monitor dcss\n");
}
return monreader_thaw(dev);
}
static const struct dev_pm_ops monreader_pm_ops = {
.freeze = monreader_freeze,
.thaw = monreader_thaw,
.restore = monreader_restore,
};
static struct device_driver monreader_driver = {
.name = "monreader",
.bus = &iucv_bus,
.pm = &monreader_pm_ops,
};
/******************************************************************************
* module init/exit *
*****************************************************************************/
@ -567,36 +474,16 @@ static int __init mon_init(void)
return rc;
}
rc = driver_register(&monreader_driver);
if (rc)
goto out_iucv;
monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!monreader_device) {
rc = -ENOMEM;
goto out_driver;
}
dev_set_name(monreader_device, "monreader-dev");
monreader_device->bus = &iucv_bus;
monreader_device->parent = iucv_root;
monreader_device->driver = &monreader_driver;
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
put_device(monreader_device);
goto out_driver;
}
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
goto out_device;
goto out_iucv;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
goto out_device;
goto out_iucv;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
@ -604,7 +491,7 @@ static int __init mon_init(void)
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
goto out_device;
goto out_iucv;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
@ -619,10 +506,6 @@ static int __init mon_init(void)
out:
segment_unload(mon_dcss_name);
out_device:
device_unregister(monreader_device);
out_driver:
driver_unregister(&monreader_driver);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
@ -632,8 +515,6 @@ static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
misc_deregister(&mon_dev);
device_unregister(monreader_device);
driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}

View File

@ -20,7 +20,6 @@
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>
@ -40,10 +39,7 @@ struct mon_buf {
char *data;
};
static LIST_HEAD(mon_priv_list);
struct mon_private {
struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
@ -199,7 +195,6 @@ static int monwrite_open(struct inode *inode, struct file *filp)
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
return nonseekable_open(inode, filp);
}
@ -217,7 +212,6 @@ static int monwrite_close(struct inode *inode, struct file *filp)
kfree(entry->data);
kfree(entry);
}
list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
@ -293,106 +287,24 @@ static struct miscdevice mon_dev = {
.minor = MISC_DYNAMIC_MINOR,
};
/*
* suspend/resume
*/
static int monwriter_freeze(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_STOP_REC);
}
}
return 0;
}
static int monwriter_restore(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
}
}
return 0;
}
static int monwriter_thaw(struct device *dev)
{
return monwriter_restore(dev);
}
static const struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
};
static struct platform_driver monwriter_pdrv = {
.driver = {
.name = "monwriter",
.pm = &monwriter_pm_ops,
},
};
static struct platform_device *monwriter_pdev;
/*
* module init/exit
*/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
rc = platform_driver_register(&monwriter_pdrv);
if (rc)
return rc;
monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
0);
if (IS_ERR(monwriter_pdev)) {
rc = PTR_ERR(monwriter_pdev);
goto out_driver;
}
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
return 0;
out_device:
platform_device_unregister(monwriter_pdev);
out_driver:
platform_driver_unregister(&monwriter_pdrv);
return rc;
return misc_register(&mon_dev);
}
static void __exit mon_exit(void)
{
misc_deregister(&mon_dev);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);

View File

@ -18,8 +18,6 @@
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <asm/types.h>
#include <asm/irq.h>
@ -49,9 +47,6 @@ static struct sclp_req sclp_init_req;
static void *sclp_read_sccb;
static struct init_sccb *sclp_init_sccb;
/* Suspend request */
static DECLARE_COMPLETION(sclp_request_queue_flushed);
/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
int sclp_console_pages = SCLP_CONSOLE_PAGES;
/* Flag to indicate if buffer pages are dropped on buffer full condition */
@ -59,11 +54,6 @@ int sclp_console_drop = 1;
/* Number of times the console dropped buffer pages */
unsigned long sclp_console_full;
static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
{
complete(&sclp_request_queue_flushed);
}
static int __init sclp_setup_console_pages(char *str)
{
int pages, rc;
@ -88,8 +78,6 @@ static int __init sclp_setup_console_drop(char *str)
__setup("sclp_con_drop=", sclp_setup_console_drop);
static struct sclp_req sclp_suspend_req;
/* Timer for request retries. */
static struct timer_list sclp_request_timer;
@ -123,12 +111,6 @@ static volatile enum sclp_mask_state_t {
sclp_mask_state_initializing
} sclp_mask_state = sclp_mask_state_idle;
/* Internal state: is the driver suspended? */
static enum sclp_suspend_state_t {
sclp_suspend_state_running,
sclp_suspend_state_suspended,
} sclp_suspend_state = sclp_suspend_state_running;
/* Maximum retry counts */
#define SCLP_INIT_RETRY 3
#define SCLP_MASK_RETRY 3
@ -314,8 +296,6 @@ sclp_process_queue(void)
del_timer(&sclp_request_timer);
while (!list_empty(&sclp_req_queue)) {
req = list_entry(sclp_req_queue.next, struct sclp_req, list);
if (!req->sccb)
goto do_post;
rc = __sclp_start_request(req);
if (rc == 0)
break;
@ -327,7 +307,6 @@ sclp_process_queue(void)
sclp_request_timeout_normal);
break;
}
do_post:
/* Post-processing for aborted request */
list_del(&req->list);
if (req->callback) {
@ -341,10 +320,8 @@ do_post:
static int __sclp_can_add_request(struct sclp_req *req)
{
if (req == &sclp_suspend_req || req == &sclp_init_req)
if (req == &sclp_init_req)
return 1;
if (sclp_suspend_state != sclp_suspend_state_running)
return 0;
if (sclp_init_state != sclp_init_state_initialized)
return 0;
if (sclp_activation_state != sclp_activation_state_active)
@ -378,16 +355,10 @@ sclp_add_request(struct sclp_req *req)
/* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) {
if (!req->sccb) {
list_del(&req->list);
rc = -ENODATA;
goto out;
}
rc = __sclp_start_request(req);
if (rc)
list_del(&req->list);
}
out:
spin_unlock_irqrestore(&sclp_lock, flags);
return rc;
}
@ -693,7 +664,6 @@ sclp_register(struct sclp_register *reg)
/* Trigger initial state change callback */
reg->sclp_receive_mask = 0;
reg->sclp_send_mask = 0;
reg->pm_event_posted = 0;
list_add(&reg->list, &sclp_reg_list);
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_init_mask(1);
@ -1011,112 +981,6 @@ static struct notifier_block sclp_reboot_notifier = {
.notifier_call = sclp_reboot_event
};
/*
* Suspend/resume SCLP notifier implementation
*/
static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
{
struct sclp_register *reg;
unsigned long flags;
if (!rollback) {
spin_lock_irqsave(&sclp_lock, flags);
list_for_each_entry(reg, &sclp_reg_list, list)
reg->pm_event_posted = 0;
spin_unlock_irqrestore(&sclp_lock, flags);
}
do {
spin_lock_irqsave(&sclp_lock, flags);
list_for_each_entry(reg, &sclp_reg_list, list) {
if (rollback && reg->pm_event_posted)
goto found;
if (!rollback && !reg->pm_event_posted)
goto found;
}
spin_unlock_irqrestore(&sclp_lock, flags);
return;
found:
spin_unlock_irqrestore(&sclp_lock, flags);
if (reg->pm_event_fn)
reg->pm_event_fn(reg, sclp_pm_event);
reg->pm_event_posted = rollback ? 0 : 1;
} while (1);
}
/*
* Susend/resume callbacks for platform device
*/
static int sclp_freeze(struct device *dev)
{
unsigned long flags;
int rc;
sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
spin_lock_irqsave(&sclp_lock, flags);
sclp_suspend_state = sclp_suspend_state_suspended;
spin_unlock_irqrestore(&sclp_lock, flags);
/* Init supend data */
memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
sclp_suspend_req.callback = sclp_suspend_req_cb;
sclp_suspend_req.status = SCLP_REQ_FILLED;
init_completion(&sclp_request_queue_flushed);
rc = sclp_add_request(&sclp_suspend_req);
if (rc == 0)
wait_for_completion(&sclp_request_queue_flushed);
else if (rc != -ENODATA)
goto fail_thaw;
rc = sclp_deactivate();
if (rc)
goto fail_thaw;
return 0;
fail_thaw:
spin_lock_irqsave(&sclp_lock, flags);
sclp_suspend_state = sclp_suspend_state_running;
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
return rc;
}
static int sclp_undo_suspend(enum sclp_pm_event event)
{
unsigned long flags;
int rc;
rc = sclp_reactivate();
if (rc)
return rc;
spin_lock_irqsave(&sclp_lock, flags);
sclp_suspend_state = sclp_suspend_state_running;
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_pm_event(event, 0);
return 0;
}
static int sclp_thaw(struct device *dev)
{
return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
}
static int sclp_restore(struct device *dev)
{
return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
}
static const struct dev_pm_ops sclp_pm_ops = {
.freeze = sclp_freeze,
.thaw = sclp_thaw,
.restore = sclp_restore,
};
static ssize_t con_pages_show(struct device_driver *dev, char *buf)
{
return sprintf(buf, "%i\n", sclp_console_pages);
@ -1155,13 +1019,10 @@ static const struct attribute_group *sclp_drv_attr_groups[] = {
static struct platform_driver sclp_pdrv = {
.driver = {
.name = "sclp",
.pm = &sclp_pm_ops,
.groups = sclp_drv_attr_groups,
},
};
static struct platform_device *sclp_pdev;
/* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */
static int
@ -1215,23 +1076,6 @@ fail_unlock:
return rc;
}
/*
* SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
* to print the panic message.
*/
static int sclp_panic_notify(struct notifier_block *self,
unsigned long event, void *data)
{
if (sclp_suspend_state == sclp_suspend_state_suspended)
sclp_undo_suspend(SCLP_PM_EVENT_THAW);
return NOTIFY_OK;
}
static struct notifier_block sclp_on_panic_nb = {
.notifier_call = sclp_panic_notify,
.priority = SCLP_PANIC_PRIO,
};
static __init int sclp_initcall(void)
{
int rc;
@ -1240,23 +1084,7 @@ static __init int sclp_initcall(void)
if (rc)
return rc;
sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto fail_platform_driver_unregister;
rc = atomic_notifier_chain_register(&panic_notifier_list,
&sclp_on_panic_nb);
if (rc)
goto fail_platform_device_unregister;
return sclp_init();
fail_platform_device_unregister:
platform_device_unregister(sclp_pdev);
fail_platform_driver_unregister:
platform_driver_unregister(&sclp_pdrv);
return rc;
}
arch_initcall(sclp_initcall);

View File

@ -81,15 +81,6 @@ typedef unsigned int sclp_cmdw_t;
#define GDS_KEY_SELFDEFTEXTMSG 0x31
enum sclp_pm_event {
SCLP_PM_EVENT_FREEZE,
SCLP_PM_EVENT_THAW,
SCLP_PM_EVENT_RESTORE,
};
#define SCLP_PANIC_PRIO 1
#define SCLP_PANIC_PRIO_CLIENT 0
typedef u64 sccb_mask_t;
struct sccb_header {
@ -293,10 +284,6 @@ struct sclp_register {
void (*state_change_fn)(struct sclp_register *);
/* called for events in cp_receive_mask/sclp_receive_mask */
void (*receiver_fn)(struct evbuf_header *);
/* called for power management events */
void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
/* pm event posted flag */
int pm_event_posted;
};
/* externals from sclp.c */

View File

@ -20,7 +20,6 @@
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/ctl_reg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
@ -168,7 +167,6 @@ static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static DECLARE_BITMAP(sclp_storage_ids, 256);
static int sclp_mem_state_changed;
struct memory_increment {
struct list_head list;
@ -359,8 +357,6 @@ static int sclp_mem_notifier(struct notifier_block *nb,
rc = -EINVAL;
break;
}
if (!rc)
sclp_mem_state_changed = 1;
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
@ -456,28 +452,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
list_add(&new_incr->list, prev);
}
static int sclp_mem_freeze(struct device *dev)
{
if (!sclp_mem_state_changed)
return 0;
pr_err("Memory hotplug state changed, suspend refused.\n");
return -EPERM;
}
static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
static struct platform_driver sclp_mem_pdrv = {
.driver = {
.name = "sclp_mem",
.pm = &sclp_mem_pm_ops,
},
};
static int __init sclp_detect_standby_memory(void)
{
struct platform_device *sclp_pdev;
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
@ -530,17 +506,7 @@ static int __init sclp_detect_standby_memory(void)
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
rc = platform_driver_register(&sclp_mem_pdrv);
if (rc)
goto out;
sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto out_driver;
sclp_add_standby_memory();
goto out;
out_driver:
platform_driver_unregister(&sclp_mem_pdrv);
out:
free_page((unsigned long) sccb);
return rc;

View File

@ -36,8 +36,6 @@ static LIST_HEAD(sclp_con_outqueue);
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
/* Suspend mode flag */
static int sclp_con_suspended;
/* Flag that output queue is currently running */
static int sclp_con_queue_running;
@ -64,7 +62,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
if (!list_empty(&sclp_con_outqueue))
buffer = list_first_entry(&sclp_con_outqueue,
struct sclp_buffer, list);
if (!buffer || sclp_con_suspended) {
if (!buffer) {
sclp_con_queue_running = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
break;
@ -86,7 +84,7 @@ static void sclp_conbuf_emit(void)
if (sclp_conbuf)
list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
if (sclp_con_queue_running || sclp_con_suspended)
if (sclp_con_queue_running)
goto out_unlock;
if (list_empty(&sclp_con_outqueue))
goto out_unlock;
@ -180,8 +178,6 @@ sclp_console_write(struct console *console, const char *message,
if (list_empty(&sclp_con_pages))
sclp_console_full++;
while (list_empty(&sclp_con_pages)) {
if (sclp_con_suspended)
goto out;
if (sclp_console_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_con_lock, flags);
@ -214,7 +210,6 @@ sclp_console_write(struct console *console, const char *message,
!timer_pending(&sclp_con_timer)) {
mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
@ -235,32 +230,6 @@ sclp_console_flush(void)
sclp_console_sync_queue();
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_console_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_console_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_console_flush();
}
static int sclp_console_notify(struct notifier_block *self,
unsigned long event, void *data)
{
@ -270,7 +239,7 @@ static int sclp_console_notify(struct notifier_block *self,
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
.priority = SCLP_PANIC_PRIO_CLIENT,
.priority = 1,
};
static struct notifier_block on_reboot_nb = {
@ -291,22 +260,6 @@ static struct console sclp_console =
.index = 0 /* ttyS0 */
};
/*
* This function is called for SCLP suspend and resume events.
*/
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_console_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_console_resume();
break;
}
}
/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/

View File

@ -231,7 +231,6 @@ static struct sclp_register sclp_ftp_event = {
.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
.receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
.state_change_fn = NULL,
.pm_event_fn = NULL,
};
/**

View File

@ -18,10 +18,6 @@
#include "sclp.h"
static void (*old_machine_restart)(char *);
static void (*old_machine_halt)(void);
static void (*old_machine_power_off)(void);
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
@ -37,42 +33,15 @@ static void do_machine_quiesce(void)
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
if (_machine_restart != (void *) do_machine_quiesce) {
old_machine_restart = _machine_restart;
old_machine_halt = _machine_halt;
old_machine_power_off = _machine_power_off;
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
}
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
ctrl_alt_del();
}
/* Undo machine restart/halt/power_off modification on resume */
static void sclp_quiesce_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_RESTORE:
if (old_machine_restart) {
_machine_restart = old_machine_restart;
_machine_halt = old_machine_halt;
_machine_power_off = old_machine_power_off;
old_machine_restart = NULL;
old_machine_halt = NULL;
old_machine_power_off = NULL;
}
break;
case SCLP_PM_EVENT_FREEZE:
case SCLP_PM_EVENT_THAW:
break;
}
}
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
.pm_event_fn = sclp_quiesce_pm_event
};
/* Initialize quiesce driver. */

View File

@ -26,16 +26,9 @@
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
static void sclp_rw_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
sclp_console_pm_event(sclp_pm_event);
}
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK,
.pm_event_fn = sclp_rw_pm_event,
};
/*

View File

@ -88,10 +88,4 @@ int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
int sclp_chars_in_buffer(struct sclp_buffer *);
#ifdef CONFIG_SCLP_CONSOLE
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
#else
static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
#endif
#endif /* __SCLP_RW_H__ */

View File

@ -284,7 +284,6 @@ static int
sclp_tty_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_buffer *t;
int count;
@ -292,8 +291,7 @@ sclp_tty_chars_in_buffer(struct tty_struct *tty)
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_chars_in_buffer(sclp_ttybuf);
list_for_each(l, &sclp_tty_outqueue) {
t = list_entry(l, struct sclp_buffer, list);
list_for_each_entry(t, &sclp_tty_outqueue, list) {
count += sclp_chars_in_buffer(t);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);

View File

@ -36,8 +36,8 @@
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
#define SCLP_VT220_DEVICE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_NAME "ttyS"
#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
#define SCLP_VT220_CONSOLE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_INDEX 0 /* console=ttysclp0 */
/* Representation of a single write request */
struct sclp_vt220_request {
@ -70,9 +70,6 @@ static LIST_HEAD(sclp_vt220_empty);
/* List of pending requests */
static LIST_HEAD(sclp_vt220_outqueue);
/* Suspend mode flag */
static int sclp_vt220_suspended;
/* Flag that output queue is currently running */
static int sclp_vt220_queue_running;
@ -96,15 +93,12 @@ static int __initdata sclp_vt220_init_count;
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
/* Registration structure for SCLP output event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
.pm_event_fn = sclp_vt220_pm_event_fn,
};
/* Registration structure for SCLP input event buffers */
@ -136,7 +130,7 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
if (!request || sclp_vt220_suspended) {
if (!request) {
sclp_vt220_queue_running = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
break;
@ -242,7 +236,7 @@ sclp_vt220_emit_current(void)
}
sclp_vt220_flush_later = 0;
}
if (sclp_vt220_queue_running || sclp_vt220_suspended)
if (sclp_vt220_queue_running)
goto out_unlock;
if (list_empty(&sclp_vt220_outqueue))
goto out_unlock;
@ -421,7 +415,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
if (list_empty(&sclp_vt220_empty))
sclp_console_full++;
while (list_empty(&sclp_vt220_empty)) {
if (may_fail || sclp_vt220_suspended)
if (may_fail)
goto out;
if (sclp_vt220_drop_buffer())
break;
@ -792,46 +786,6 @@ static void __sclp_vt220_flush_buffer(void)
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_vt220_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_vt220_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
__sclp_vt220_flush_buffer();
}
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_vt220_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_vt220_resume();
break;
}
}
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void

View File

@ -679,34 +679,10 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = {
NULL,
};
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
spin_lock_bh(&priv->priv_lock);
if (priv->dev_in_use)
rc = -EBUSY;
spin_unlock_bh(&priv->priv_lock);
}
if (rc)
pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
dev_name(dev));
return rc;
}
static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.pm = &vmlogrdr_pm_ops,
.groups = vmlogrdr_drv_attr_groups,
};

View File

@ -93,7 +93,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
struct hlist_head *head;
set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
tpi_info = &get_irq_regs()->tpi_info;
trace_s390_cio_adapter_int(tpi_info);
head = &airq_lists[tpi_info->isc];
rcu_read_lock();

View File

@ -45,27 +45,6 @@ static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
}
}
/*
* Remove references from ccw devices to ccw group device and from
* ccw group device to ccw devices.
*/
static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
{
struct ccw_device *cdev;
int i;
for (i = 0; i < gdev->count; i++) {
cdev = gdev->cdev[i];
if (!cdev)
continue;
spin_lock_irq(cdev->ccwlock);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irq(cdev->ccwlock);
gdev->cdev[i] = NULL;
put_device(&cdev->dev);
}
}
/**
* ccwgroup_set_online() - enable a ccwgroup device
* @gdev: target ccwgroup device
@ -175,7 +154,6 @@ static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
@ -228,7 +206,23 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work)
static void ccwgroup_release(struct device *dev)
{
kfree(to_ccwgroupdev(dev));
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
unsigned int i;
for (i = 0; i < gdev->count; i++) {
struct ccw_device *cdev = gdev->cdev[i];
unsigned long flags;
if (cdev) {
spin_lock_irqsave(cdev->ccwlock, flags);
if (dev_get_drvdata(&cdev->dev) == gdev)
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(cdev->ccwlock, flags);
put_device(&cdev->dev);
}
}
kfree(gdev);
}
static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
@ -396,15 +390,6 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
mutex_unlock(&gdev->reg_mutex);
return 0;
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
@ -416,7 +401,7 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
{
struct ccwgroup_device *gdev = to_ccwgroupdev(data);
if (action == BUS_NOTIFY_UNBIND_DRIVER) {
if (action == BUS_NOTIFY_UNBOUND_DRIVER) {
get_device(&gdev->dev);
schedule_work(&gdev->ungroup_work);
}
@ -514,15 +499,6 @@ EXPORT_SYMBOL(ccwgroup_driver_register);
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
while ((dev = driver_find_next_device(&cdriver->driver, NULL))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
ccwgroup_ungroup(gdev);
put_device(dev);
}
driver_unregister(&cdriver->driver);
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);

View File

@ -255,6 +255,9 @@ static ssize_t chp_status_write(struct device *dev,
if (!num_args)
return count;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);

View File

@ -801,8 +801,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/

View File

@ -536,7 +536,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
struct irb *irb;
set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
tpi_info = &get_irq_regs()->tpi_info;
trace_s390_cio_interrupt(tpi_info);
irb = this_cpu_ptr(&cio_irb);
sch = (struct subchannel *)(unsigned long) tpi_info->intparm;

View File

@ -9,6 +9,7 @@
#include <asm/cio.h>
#include <asm/fcx.h>
#include <asm/schid.h>
#include <asm/tpi.h>
#include "chsc.h"
/*
@ -46,18 +47,6 @@ struct pmcw {
/* ... in an operand exception. */
} __attribute__ ((packed));
/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
struct tpi_info {
struct subchannel_id schid;
u32 intparm;
u32 adapter_IO:1;
u32 directed_irq:1;
u32 isc:3;
u32 :27;
u32 type:3;
u32 :12;
} __packed __aligned(4);
/* Target SCHIB configuration. */
struct schib_config {
u64 mba;

View File

@ -163,13 +163,14 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
*/
static inline void cmf_activate(void *area, unsigned int onoff)
{
register void * __gpr2 asm("2");
register long __gpr1 asm("1");
__gpr2 = area;
__gpr1 = onoff;
/* activate channel measurement */
asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
asm volatile(
" lgr 1,%[r1]\n"
" lgr 2,%[mbo]\n"
" schm\n"
:
: [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
: "1", "2");
}
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,

View File

@ -16,18 +16,19 @@
static inline int __stsch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
" stsch 0(%3)\n"
"0: ipm %0\n"
" srl %0,28\n"
" lgr 1,%[r1]\n"
" stsch %[addr]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (ccode), "=m" (*addr)
: "d" (reg1), "a" (addr)
: "cc");
: [cc] "+&d" (ccode), [addr] "=Q" (*addr)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
@ -44,18 +45,19 @@ EXPORT_SYMBOL(stsch);
static inline int __msch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
" msch 0(%2)\n"
"0: ipm %0\n"
" srl %0,28\n"
" lgr 1,%[r1]\n"
" msch %[addr]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (ccode)
: "d" (reg1), "a" (addr), "m" (*addr)
: "cc");
: [cc] "+&d" (ccode)
: [r1] "d" (r1), [addr] "Q" (*addr)
: "cc", "1");
return ccode;
}
@ -71,16 +73,17 @@ int msch(struct subchannel_id schid, struct schib *addr)
static inline int __tsch(struct subchannel_id schid, struct irb *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" tsch 0(%3)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode), "=m" (*addr)
: "d" (reg1), "a" (addr)
: "cc");
" lgr 1,%[r1]\n"
" tsch %[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28"
: [cc] "=&d" (ccode), [addr] "=Q" (*addr)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
@ -96,18 +99,19 @@ int tsch(struct subchannel_id schid, struct irb *addr)
static inline int __ssch(struct subchannel_id schid, union orb *addr)
{
register struct subchannel_id reg1 asm("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
" ssch 0(%2)\n"
"0: ipm %0\n"
" srl %0,28\n"
" lgr 1,%[r1]\n"
" ssch %[addr]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (ccode)
: "d" (reg1), "a" (addr), "m" (*addr)
: "cc", "memory");
: [cc] "+&d" (ccode)
: [r1] "d" (r1), [addr] "Q" (*addr)
: "cc", "memory", "1");
return ccode;
}
@ -124,16 +128,17 @@ EXPORT_SYMBOL(ssch);
static inline int __csch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" csch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (reg1)
: "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
@ -153,11 +158,11 @@ int tpi(struct tpi_info *addr)
int ccode;
asm volatile(
" tpi 0(%2)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode), "=m" (*addr)
: "a" (addr)
" tpi %[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28"
: [cc] "=&d" (ccode), [addr] "=Q" (*addr)
:
: "cc");
trace_s390_cio_tpi(addr, ccode);
@ -170,13 +175,13 @@ int chsc(void *chsc_area)
int cc = -EIO;
asm volatile(
" .insn rre,0xb25f0000,%2,0\n"
"0: ipm %0\n"
" srl %0,28\n"
" .insn rre,0xb25f0000,%[chsc_area],0\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area)
: [chsc_area] "d" (chsc_area)
: "cc");
trace_s390_cio_chsc(chsc_area, cc);
@ -186,17 +191,17 @@ EXPORT_SYMBOL(chsc);
static inline int __rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" rsch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (reg1)
: "cc", "memory");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "memory", "1");
return ccode;
}
@ -212,16 +217,17 @@ int rsch(struct subchannel_id schid)
static inline int __hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" hsch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (reg1)
: "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
@ -238,16 +244,17 @@ EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" xsch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (reg1)
: "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
@ -266,11 +273,11 @@ static inline int __stcrw(struct crw *crw)
int ccode;
asm volatile(
" stcrw 0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode), "=m" (*crw)
: "a" (crw)
" stcrw %[crw]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode), [crw] "=Q" (*crw)
:
: "cc");
return ccode;
}

View File

@ -88,15 +88,15 @@ enum qdio_irq_states {
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count)
{
register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _ccq = *count;
asm volatile(
" .insn rsy,0xeb000000008A,%1,0,0(%2)"
: "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_token)
: "memory", "cc");
" lgr 1,%[token]\n"
" .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
: [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
: [state] "d" ((unsigned long)state), [token] "d" (token)
: "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
@ -106,16 +106,17 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
int *start, int *count, int ack)
{
register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = (unsigned long)ack << 63;
unsigned long _ccq = *count;
asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0"
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
: "d" (_token)
: "memory", "cc");
" lgr 1,%[token]\n"
" .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0"
: [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart),
[state] "+&d" (_state)
: [token] "d" (token)
: "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;

View File

@ -31,38 +31,41 @@ MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
unsigned int out_mask, unsigned int in_mask,
unsigned long out_mask, unsigned long in_mask,
unsigned int fc)
{
register unsigned long __fc asm ("0") = fc;
register unsigned long __schid asm ("1") = schid;
register unsigned long out asm ("2") = out_mask;
register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[out]\n"
" lgr 3,%[in]\n"
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc)
: [fc] "d" (fc), [schid] "d" (schid),
[out] "d" (out_mask), [in] "d" (in_mask)
: "cc", "0", "1", "2", "3");
return cc;
}
static inline int do_siga_input(unsigned long schid, unsigned int mask,
unsigned int fc)
static inline int do_siga_input(unsigned long schid, unsigned long mask,
unsigned long fc)
{
register unsigned long __fc asm ("0") = fc;
register unsigned long __schid asm ("1") = schid;
register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc)
: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
: "cc", "0", "1", "2");
return cc;
}
@ -78,23 +81,24 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned int fc,
unsigned int *bb, unsigned long fc,
unsigned long aob)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
register unsigned long __aob asm("3") = aob;
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" lgr 3,%[aob]\n"
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "+d" (__fc), "+d" (__aob)
: "d" (__schid), "d" (__mask)
: "cc");
*bb = __fc >> 31;
" lgr %[fc],0\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [fc] "+&d" (fc)
: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
: "cc", "0", "1", "2", "3");
*bb = fc >> 31;
return cc;
}

View File

@ -168,10 +168,8 @@ TRACE_EVENT(s390_cio_tpi,
memset(&__entry->tpi_info, 0, sizeof(struct tpi_info));
else if (addr)
__entry->tpi_info = *addr;
else {
memcpy(&__entry->tpi_info, &S390_lowcore.subchannel_id,
sizeof(struct tpi_info));
}
else
__entry->tpi_info = S390_lowcore.tpi_info;
__entry->cssid = __entry->tpi_info.schid.cssid;
__entry->ssid = __entry->tpi_info.schid.ssid;
__entry->schno = __entry->tpi_info.schid.sch_no;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2006, 2020
* Copyright IBM Corp. 2006, 2021
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@ -77,6 +77,9 @@ EXPORT_SYMBOL(ap_perms_mutex);
/* # of bus scans since init */
static atomic64_t ap_scan_bus_count;
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
/* completion for initial APQN bindings complete */
static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
@ -584,22 +587,47 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
*/
static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
{
int rc;
int rc = 0;
struct ap_device *ap_dev = to_ap_dev(dev);
/* Uevents from ap bus core don't need extensions to the env */
if (dev == ap_root_device)
return 0;
/* Set up DEV_TYPE environment variable. */
rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
if (rc)
return rc;
if (is_card_dev(dev)) {
struct ap_card *ac = to_ap_card(&ap_dev->device);
/* Add MODALIAS= */
rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
if (rc)
return rc;
/* Set up DEV_TYPE environment variable. */
rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
if (rc)
return rc;
/* Add MODALIAS= */
rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
if (rc)
return rc;
/* Add MODE=<accel|cca|ep11> */
if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
rc = add_uevent_var(env, "MODE=accel");
else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
rc = add_uevent_var(env, "MODE=cca");
else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
} else {
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
/* Add MODE=<accel|cca|ep11> */
if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
rc = add_uevent_var(env, "MODE=accel");
else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
rc = add_uevent_var(env, "MODE=cca");
else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
}
return 0;
}
@ -613,11 +641,36 @@ static void ap_send_init_scan_done_uevent(void)
static void ap_send_bindings_complete_uevent(void)
{
char *envp[] = { "BINDINGS=complete", NULL };
char buf[32];
char *envp[] = { "BINDINGS=complete", buf, NULL };
snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
atomic64_inc_return(&ap_bindings_complete_count));
kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
{
char buf[16];
char *envp[] = { buf, NULL };
snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(ap_send_config_uevent);
void ap_send_online_uevent(struct ap_device *ap_dev, int online)
{
char buf[16];
char *envp[] = { buf, NULL };
snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(ap_send_online_uevent);
/*
* calc # of bound APQNs
*/
@ -885,8 +938,6 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
struct device_driver *drv = &ap_drv->driver;
drv->bus = &ap_bus_type;
drv->probe = ap_device_probe;
drv->remove = ap_device_remove;
drv->owner = owner;
drv->name = name;
return driver_register(drv);
@ -1319,6 +1370,8 @@ static struct bus_type ap_bus_type = {
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
.uevent = &ap_uevent,
.probe = ap_device_probe,
.remove = ap_device_remove,
};
/**
@ -1540,6 +1593,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
spin_unlock_bh(&aq->lock);
AP_DBF_INFO("%s(%d,%d) queue device config off\n",
__func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@ -1554,6 +1608,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
spin_unlock_bh(&aq->lock);
AP_DBF_INFO("%s(%d,%d) queue device config on\n",
__func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
/* handle other error states */
@ -1663,12 +1718,13 @@ static inline void ap_scan_adapter(int ap)
ac->config = false;
AP_DBF_INFO("%s(%d) card device config off\n",
__func__, ap);
ap_send_config_uevent(&ac->ap_dev, ac->config);
}
if (!decfg && !ac->config) {
ac->config = true;
AP_DBF_INFO("%s(%d) card device config on\n",
__func__, ap);
ap_send_config_uevent(&ac->ap_dev, ac->config);
}
}
}

View File

@ -362,4 +362,7 @@ int ap_parse_mask_str(const char *str,
*/
int ap_wait_init_apqn_bindings_complete(unsigned long timeout);
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
void ap_send_online_uevent(struct ap_device *ap_dev, int online);
#endif /* _AP_BUS_H_ */

View File

@ -167,6 +167,8 @@ static ssize_t config_store(struct device *dev,
ac->config = cfg ? true : false;
ap_send_config_uevent(&ac->ap_dev, ac->config);
return count;
}

Some files were not shown because too many files have changed in this diff Show More