Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The main bulk of the s390 patches for the 4.10 merge window: - Add support for the contiguous memory allocator. - The recovery for I/O errors in the dasd device driver is improved, the driver will now remove channel paths that are not working properly. - Additional fields are added to /proc/sysinfo, the extended partition name and the partition UUID. - New naming for PCI devices with system defined UIDs. - The last few remaining alloc_bootmem calls are converted to memblock. - The thread_info structure is stripped down and moved to the task_struct. The only field left in thread_info is the flags field. - Rework of the arch topology code to fix a fake numa issue. - Refactoring of the atomic primitives and add a new preempt_count implementation. - Clocksource steering for the STP sync check offsets. - The s390 specific headers are changed to make them usable with CLANG. - Bug fixes and cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (70 commits) s390/cpumf: Use configuration level indication for sampling data s390: provide memmove implementation s390: cleanup arch/s390/kernel Makefile s390: fix initrd corruptions with gcov/kcov instrumented kernels s390: exclude early C code from gcov profiling s390/dasd: channel path aware error recovery s390/dasd: extend dasd path handling s390: remove unused labels from entry.S s390/vmlogrdr: fix IUCV buffer allocation s390/crypto: unlock on error in prng_tdes_read() s390/sysinfo: show partition extended name and UUID if available s390/numa: pin all possible cpus to nodes early s390/numa: establish cpu to node mapping early s390/topology: use cpu_topology array instead of per cpu variable s390/smp: initialize cpu_present_mask in setup_arch s390/topology: always use s390 specific sched_domain_topology_level s390/smp: use smp_get_base_cpu() helper function s390/numa: always use logical cpu and core ids s390: Remove VLAIS in ptff() and clear_table() s390: fix machine check panic stack switch ...
This commit is contained in:
commit
2ec4584eb8
|
@ -10557,7 +10557,7 @@ F: arch/s390/pci/
|
|||
F: drivers/pci/hotplug/s390_pci_hpc.c
|
||||
|
||||
S390 ZCRYPT DRIVER
|
||||
M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
|
||||
M: Harald Freudenberger <freude@de.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
S: Supported
|
||||
|
|
|
@ -136,6 +136,7 @@ config S390
|
|||
select HAVE_CMPXCHG_LOCAL
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
|
@ -169,6 +170,7 @@ config S390
|
|||
select OLD_SIGSUSPEND3
|
||||
select SPARSE_IRQ
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select THREAD_INFO_IN_TASK
|
||||
select TTY
|
||||
select VIRT_CPU_ACCOUNTING
|
||||
select ARCH_HAS_SCALED_CPUTIME
|
||||
|
|
|
@ -46,7 +46,7 @@ mover_end:
|
|||
|
||||
.align 8
|
||||
.Lstack:
|
||||
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
|
||||
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
|
||||
.Loffset:
|
||||
.quad 0x11000
|
||||
.Lmvsize:
|
||||
|
|
|
@ -66,6 +66,8 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
|
|||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_CMA_DEBUG=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_ZBUD=m
|
||||
|
@ -366,6 +368,8 @@ CONFIG_BPF_JIT=y
|
|||
CONFIG_NET_PKTGEN=m
|
||||
CONFIG_NET_TCPPROBE=m
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
|
@ -438,7 +442,6 @@ CONFIG_TUN=m
|
|||
CONFIG_VETH=m
|
||||
CONFIG_VIRTIO_NET=m
|
||||
CONFIG_NLMON=m
|
||||
CONFIG_VHOST_NET=m
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_CHELSIO is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
|
@ -693,3 +696,4 @@ CONFIG_CMM=m
|
|||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_KVM_S390_UCONTROL=y
|
||||
CONFIG_VHOST_NET=m
|
||||
|
|
|
@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
|
|||
CONFIG_NET_PKTGEN=m
|
||||
CONFIG_NET_TCPPROBE=m
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
|
@ -434,7 +436,6 @@ CONFIG_TUN=m
|
|||
CONFIG_VETH=m
|
||||
CONFIG_VIRTIO_NET=m
|
||||
CONFIG_NLMON=m
|
||||
CONFIG_VHOST_NET=m
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_CHELSIO is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
|
@ -633,3 +634,4 @@ CONFIG_CMM=m
|
|||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_KVM_S390_UCONTROL=y
|
||||
CONFIG_VHOST_NET=m
|
||||
|
|
|
@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
|
|||
CONFIG_NET_PKTGEN=m
|
||||
CONFIG_NET_TCPPROBE=m
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
|
@ -434,7 +436,6 @@ CONFIG_TUN=m
|
|||
CONFIG_VETH=m
|
||||
CONFIG_VIRTIO_NET=m
|
||||
CONFIG_NLMON=m
|
||||
CONFIG_VHOST_NET=m
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_CHELSIO is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
|
@ -632,3 +633,4 @@ CONFIG_CMM=m
|
|||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_KVM_S390_UCONTROL=y
|
||||
CONFIG_VHOST_NET=m
|
||||
|
|
|
@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
|
|||
prng_data->prngws.byte_counter += n;
|
||||
prng_data->prngws.reseed_counter += n;
|
||||
|
||||
if (copy_to_user(ubuf, prng_data->buf, chunk))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(ubuf, prng_data->buf, chunk)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
nbytes -= chunk;
|
||||
ret += chunk;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
*
|
||||
* Copyright IBM Corp. 2006, 2008
|
||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||
* License: GPL
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "hypfs"
|
||||
|
@ -18,7 +19,8 @@
|
|||
#include <linux/time.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/uio.h>
|
||||
|
@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
|
|||
.mount = hypfs_mount,
|
||||
.kill_sb = hypfs_kill_super
|
||||
};
|
||||
MODULE_ALIAS_FS("s390_hypfs");
|
||||
|
||||
static const struct super_operations hypfs_s_ops = {
|
||||
.statfs = simple_statfs,
|
||||
|
@ -497,21 +498,4 @@ fail_dbfs_exit:
|
|||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit hypfs_exit(void)
|
||||
{
|
||||
unregister_filesystem(&hypfs_type);
|
||||
sysfs_remove_mount_point(hypervisor_kobj, "s390");
|
||||
hypfs_diag0c_exit();
|
||||
hypfs_sprp_exit();
|
||||
hypfs_vm_exit();
|
||||
hypfs_diag_exit();
|
||||
hypfs_dbfs_exit();
|
||||
}
|
||||
|
||||
module_init(hypfs_init)
|
||||
module_exit(hypfs_exit)
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
|
||||
MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
|
||||
device_initcall(hypfs_init)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
|
||||
generic-y += asm-offsets.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += export.h
|
||||
generic-y += irq_work.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
#include <generated/asm-offsets.h>
|
|
@ -1,13 +1,8 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
* Copyright IBM Corp. 1999, 2016
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
* Denis Joseph Barrow,
|
||||
* Arnd Bergmann <arndb@de.ibm.com>,
|
||||
*
|
||||
* Atomic operations that C can't guarantee us.
|
||||
* Useful for resource counting etc.
|
||||
* s390 uses 'Compare And Swap' for atomicity in SMP environment.
|
||||
*
|
||||
* Arnd Bergmann,
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_S390_ATOMIC__
|
||||
|
@ -15,62 +10,12 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define __ATOMIC_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OR "lao"
|
||||
#define __ATOMIC_AND "lan"
|
||||
#define __ATOMIC_ADD "laa"
|
||||
#define __ATOMIC_XOR "lax"
|
||||
#define __ATOMIC_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(atomic_t *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (old_val), "+Q" ((ptr)->counter) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OR "or"
|
||||
#define __ATOMIC_AND "nr"
|
||||
#define __ATOMIC_ADD "ar"
|
||||
#define __ATOMIC_XOR "xr"
|
||||
#define __ATOMIC_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
int old_val, new_val; \
|
||||
\
|
||||
typecheck(atomic_t *, ptr); \
|
||||
asm volatile( \
|
||||
" l %0,%2\n" \
|
||||
"0: lr %1,%0\n" \
|
||||
op_string " %1,%3\n" \
|
||||
" cs %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
int c;
|
||||
|
@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
|
|||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
|
||||
return __atomic_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
|
||||
return __atomic_add_barrier(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||
asm volatile(
|
||||
"asi %0,%1\n"
|
||||
: "+Q" (v->counter)
|
||||
: "i" (i)
|
||||
: "cc", "memory");
|
||||
__atomic_add_const(i, &v->counter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
|
||||
__atomic_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
|
||||
|
@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
|
|||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
|
||||
#define ATOMIC_OPS(op, OP) \
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
|
||||
return __atomic_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC_OPS(and, AND)
|
||||
ATOMIC_OPS(or, OR)
|
||||
ATOMIC_OPS(xor, XOR)
|
||||
ATOMIC_OPS(and)
|
||||
ATOMIC_OPS(or)
|
||||
ATOMIC_OPS(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
|
||||
|
@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
|
|||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %0,%2,%1"
|
||||
: "+d" (old), "+Q" (v->counter)
|
||||
: "d" (new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
return __atomic_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
return c;
|
||||
}
|
||||
|
||||
|
||||
#undef __ATOMIC_LOOP
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#define __ATOMIC64_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC64_OR "laog"
|
||||
#define __ATOMIC64_AND "lang"
|
||||
#define __ATOMIC64_ADD "laag"
|
||||
#define __ATOMIC64_XOR "laxg"
|
||||
#define __ATOMIC64_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
long long old_val; \
|
||||
\
|
||||
typecheck(atomic64_t *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (old_val), "+Q" ((ptr)->counter) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC64_OR "ogr"
|
||||
#define __ATOMIC64_AND "ngr"
|
||||
#define __ATOMIC64_ADD "agr"
|
||||
#define __ATOMIC64_XOR "xgr"
|
||||
#define __ATOMIC64_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
long long old_val, new_val; \
|
||||
\
|
||||
typecheck(atomic64_t *, ptr); \
|
||||
asm volatile( \
|
||||
" lg %0,%2\n" \
|
||||
"0: lgr %1,%0\n" \
|
||||
op_string " %1,%3\n" \
|
||||
" csg %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
static inline long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
long long c;
|
||||
long c;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%1\n"
|
||||
|
@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
|
|||
return c;
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
static inline void atomic64_set(atomic64_t *v, long i)
|
||||
{
|
||||
asm volatile(
|
||||
" stg %1,%0\n"
|
||||
: "=Q" (v->counter) : "d" (i));
|
||||
}
|
||||
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
||||
{
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
|
||||
return __atomic64_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
|
||||
static inline long atomic64_fetch_add(long i, atomic64_t *v)
|
||||
{
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
|
||||
return __atomic64_add_barrier(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
||||
static inline void atomic64_add(long i, atomic64_t *v)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||
asm volatile(
|
||||
"agsi %0,%1\n"
|
||||
: "+Q" (v->counter)
|
||||
: "i" (i)
|
||||
: "cc", "memory");
|
||||
__atomic64_add_const(i, &v->counter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
|
||||
__atomic64_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v,
|
||||
long long old, long long new)
|
||||
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %0,%2,%1"
|
||||
: "+d" (old), "+Q" (v->counter)
|
||||
: "d" (new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
return __atomic64_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, OP) \
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
|
||||
__atomic64_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
|
||||
return __atomic64_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC64_OPS(and, AND)
|
||||
ATOMIC64_OPS(or, OR)
|
||||
ATOMIC64_OPS(xor, XOR)
|
||||
ATOMIC64_OPS(and)
|
||||
ATOMIC64_OPS(or)
|
||||
ATOMIC64_OPS(xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef __ATOMIC64_LOOP
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
|
||||
{
|
||||
long long c, old;
|
||||
long c, old;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
|
@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
|||
return c != u;
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long c, old, dec;
|
||||
long c, old, dec;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
|
@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|||
#define atomic64_inc(_v) atomic64_add(1, _v)
|
||||
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
|
||||
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
|
||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
|
||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
|
||||
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
|
||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
|
||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
|
||||
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
|
||||
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
|
||||
#define atomic64_dec(_v) atomic64_sub(1, _v)
|
||||
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Low level function for atomic operations
|
||||
*
|
||||
* Copyright IBM Corp. 1999, 2016
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_S390_ATOMIC_OPS__
|
||||
#define __ARCH_S390_ATOMIC_OPS__
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static inline op_type op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
op_type old; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[old],%[val],%[ptr]\n" \
|
||||
op_barrier \
|
||||
: [old] "=d" (old), [ptr] "+Q" (*ptr) \
|
||||
: [val] "d" (val) : "cc", "memory"); \
|
||||
return old; \
|
||||
} \
|
||||
|
||||
#define __ATOMIC_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_OP(op_name, op_type, op_string, "\n") \
|
||||
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
|
||||
__ATOMIC_OPS(__atomic_add, int, "laa")
|
||||
__ATOMIC_OPS(__atomic_and, int, "lan")
|
||||
__ATOMIC_OPS(__atomic_or, int, "lao")
|
||||
__ATOMIC_OPS(__atomic_xor, int, "lax")
|
||||
|
||||
__ATOMIC_OPS(__atomic64_add, long, "laag")
|
||||
__ATOMIC_OPS(__atomic64_and, long, "lang")
|
||||
__ATOMIC_OPS(__atomic64_or, long, "laog")
|
||||
__ATOMIC_OPS(__atomic64_xor, long, "laxg")
|
||||
|
||||
#undef __ATOMIC_OPS
|
||||
#undef __ATOMIC_OP
|
||||
|
||||
static inline void __atomic_add_const(int val, int *ptr)
|
||||
{
|
||||
asm volatile(
|
||||
" asi %[ptr],%[val]\n"
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
|
||||
}
|
||||
|
||||
static inline void __atomic64_add_const(long val, long *ptr)
|
||||
{
|
||||
asm volatile(
|
||||
" agsi %[ptr],%[val]\n"
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
|
||||
}
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_string) \
|
||||
static inline int op_name(int val, int *ptr) \
|
||||
{ \
|
||||
int old, new; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: lr %[new],%[old]\n" \
|
||||
op_string " %[new],%[val]\n" \
|
||||
" cs %[old],%[new],%[ptr]\n" \
|
||||
" jl 0b" \
|
||||
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
|
||||
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
|
||||
return old; \
|
||||
}
|
||||
|
||||
#define __ATOMIC_OPS(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name##_barrier, op_string)
|
||||
|
||||
__ATOMIC_OPS(__atomic_add, "ar")
|
||||
__ATOMIC_OPS(__atomic_and, "nr")
|
||||
__ATOMIC_OPS(__atomic_or, "or")
|
||||
__ATOMIC_OPS(__atomic_xor, "xr")
|
||||
|
||||
#undef __ATOMIC_OPS
|
||||
|
||||
#define __ATOMIC64_OP(op_name, op_string) \
|
||||
static inline long op_name(long val, long *ptr) \
|
||||
{ \
|
||||
long old, new; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: lgr %[new],%[old]\n" \
|
||||
op_string " %[new],%[val]\n" \
|
||||
" csg %[old],%[new],%[ptr]\n" \
|
||||
" jl 0b" \
|
||||
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
|
||||
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
|
||||
return old; \
|
||||
}
|
||||
|
||||
#define __ATOMIC64_OPS(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name##_barrier, op_string)
|
||||
|
||||
__ATOMIC64_OPS(__atomic64_add, "agr")
|
||||
__ATOMIC64_OPS(__atomic64_and, "ngr")
|
||||
__ATOMIC64_OPS(__atomic64_or, "ogr")
|
||||
__ATOMIC64_OPS(__atomic64_xor, "xgr")
|
||||
|
||||
#undef __ATOMIC64_OPS
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
#endif /* __ARCH_S390_ATOMIC_OPS__ */
|
|
@ -42,57 +42,9 @@
|
|||
|
||||
#include <linux/typecheck.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __BITOPS_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __BITOPS_OR "laog"
|
||||
#define __BITOPS_AND "lang"
|
||||
#define __BITOPS_XOR "laxg"
|
||||
#define __BITOPS_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
|
||||
({ \
|
||||
unsigned long __old; \
|
||||
\
|
||||
typecheck(unsigned long *, (__addr)); \
|
||||
asm volatile( \
|
||||
__op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (__old), "+Q" (*(__addr)) \
|
||||
: "d" (__val) \
|
||||
: "cc", "memory"); \
|
||||
__old; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __BITOPS_OR "ogr"
|
||||
#define __BITOPS_AND "ngr"
|
||||
#define __BITOPS_XOR "xgr"
|
||||
#define __BITOPS_BARRIER "\n"
|
||||
|
||||
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
|
||||
({ \
|
||||
unsigned long __old, __new; \
|
||||
\
|
||||
typecheck(unsigned long *, (__addr)); \
|
||||
asm volatile( \
|
||||
" lg %0,%2\n" \
|
||||
"0: lgr %1,%0\n" \
|
||||
__op_string " %1,%3\n" \
|
||||
" csg %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
|
||||
: "d" (__val) \
|
||||
: "cc", "memory"); \
|
||||
__old; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
|
||||
|
||||
static inline unsigned long *
|
||||
|
@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
}
|
||||
#endif
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
|
||||
__atomic64_or(mask, addr);
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
|
@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
}
|
||||
#endif
|
||||
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
|
||||
__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
|
||||
__atomic64_and(mask, addr);
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
|
@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
}
|
||||
#endif
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
|
||||
__atomic64_xor(mask, addr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
unsigned long old, mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
|
||||
old = __atomic64_or_barrier(mask, addr);
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
|
@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
unsigned long old, mask;
|
||||
|
||||
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
|
||||
old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
|
||||
old = __atomic64_and_barrier(mask, addr);
|
||||
return (old & ~mask) != 0;
|
||||
}
|
||||
|
||||
|
@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
|
|||
unsigned long old, mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
|
||||
old = __atomic64_xor_barrier(mask, addr);
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,8 @@ struct hws_basic_entry {
|
|||
unsigned int P:1; /* 28 PSW Problem state */
|
||||
unsigned int AS:2; /* 29-30 PSW address-space control */
|
||||
unsigned int I:1; /* 31 entry valid or invalid */
|
||||
unsigned int:16;
|
||||
unsigned int CL:2; /* 32-33 Configuration Level */
|
||||
unsigned int:14;
|
||||
unsigned int prim_asn:16; /* primary ASN */
|
||||
unsigned long long ia; /* Instruction Address */
|
||||
unsigned long long gpp; /* Guest Program Parameter */
|
||||
|
|
|
@ -193,7 +193,7 @@ extern char elf_platform[];
|
|||
do { \
|
||||
set_personality(PER_LINUX | \
|
||||
(current->personality & (~PER_MASK))); \
|
||||
current_thread_info()->sys_call_table = \
|
||||
current->thread.sys_call_table = \
|
||||
(unsigned long) &sys_call_table; \
|
||||
} while (0)
|
||||
#else /* CONFIG_COMPAT */
|
||||
|
@ -204,11 +204,11 @@ do { \
|
|||
(current->personality & ~PER_MASK)); \
|
||||
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
|
||||
set_thread_flag(TIF_31BIT); \
|
||||
current_thread_info()->sys_call_table = \
|
||||
current->thread.sys_call_table = \
|
||||
(unsigned long) &sys_call_table_emu; \
|
||||
} else { \
|
||||
clear_thread_flag(TIF_31BIT); \
|
||||
current_thread_info()->sys_call_table = \
|
||||
current->thread.sys_call_table = \
|
||||
(unsigned long) &sys_call_table; \
|
||||
} \
|
||||
} while (0)
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2015
|
||||
*/
|
||||
|
||||
#ifndef S390_GEN_FACILITIES_C
|
||||
#error "This file can only be included by gen_facilities.c"
|
||||
#endif
|
||||
|
||||
#include <linux/kconfig.h>
|
||||
|
||||
struct facility_def {
|
||||
char *name;
|
||||
int *bits;
|
||||
};
|
||||
|
||||
static struct facility_def facility_defs[] = {
|
||||
{
|
||||
/*
|
||||
* FACILITIES_ALS contains the list of facilities that are
|
||||
* required to run a kernel that is compiled e.g. with
|
||||
* -march=<machine>.
|
||||
*/
|
||||
.name = "FACILITIES_ALS",
|
||||
.bits = (int[]){
|
||||
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
|
||||
0, /* N3 instructions */
|
||||
1, /* z/Arch mode installed */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
18, /* long displacement facility */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
7, /* stfle */
|
||||
17, /* message security assist */
|
||||
21, /* extended-immediate facility */
|
||||
25, /* store clock fast */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
27, /* mvcos */
|
||||
32, /* compare and swap and store */
|
||||
33, /* compare and swap and store 2 */
|
||||
34, /* general extension facility */
|
||||
35, /* execute extensions */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
45, /* fast-BCR, etc. */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
49, /* misc-instruction-extensions */
|
||||
52, /* interlocked facility 2 */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
|
||||
53, /* load-and-zero-rightmost-byte, etc. */
|
||||
#endif
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "FACILITIES_KVM",
|
||||
.bits = (int[]){
|
||||
0, /* N3 instructions */
|
||||
1, /* z/Arch mode installed */
|
||||
2, /* z/Arch mode active */
|
||||
3, /* DAT-enhancement */
|
||||
4, /* idte segment table */
|
||||
5, /* idte region table */
|
||||
6, /* ASN-and-LX reuse */
|
||||
7, /* stfle */
|
||||
8, /* enhanced-DAT 1 */
|
||||
9, /* sense-running-status */
|
||||
10, /* conditional sske */
|
||||
13, /* ipte-range */
|
||||
14, /* nonquiescing key-setting */
|
||||
73, /* transactional execution */
|
||||
75, /* access-exception-fetch/store indication */
|
||||
76, /* msa extension 3 */
|
||||
77, /* msa extension 4 */
|
||||
78, /* enhanced-DAT 2 */
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
};
|
|
@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
|
|||
extern void do_reipl(void);
|
||||
extern void do_halt(void);
|
||||
extern void do_poff(void);
|
||||
extern void ipl_save_parameters(void);
|
||||
extern void ipl_verify_parameters(void);
|
||||
extern void ipl_update_parameters(void);
|
||||
extern size_t append_ipl_vmparm(char *, size_t);
|
||||
extern size_t append_ipl_scpdata(char *, size_t);
|
||||
|
|
|
@ -95,7 +95,7 @@ struct lowcore {
|
|||
|
||||
/* Current process. */
|
||||
__u64 current_task; /* 0x0310 */
|
||||
__u64 thread_info; /* 0x0318 */
|
||||
__u8 pad_0x318[0x320-0x318]; /* 0x0318 */
|
||||
__u64 kernel_stack; /* 0x0320 */
|
||||
|
||||
/* Interrupt, panic and restart stack. */
|
||||
|
@ -126,7 +126,8 @@ struct lowcore {
|
|||
__u64 percpu_offset; /* 0x0378 */
|
||||
__u64 vdso_per_cpu_data; /* 0x0380 */
|
||||
__u64 machine_flags; /* 0x0388 */
|
||||
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
|
||||
__u32 preempt_count; /* 0x0390 */
|
||||
__u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
|
||||
__u64 gmap; /* 0x0398 */
|
||||
__u32 spinlock_lockval; /* 0x03a0 */
|
||||
__u32 fpu_flags; /* 0x03a4 */
|
||||
|
|
|
@ -46,6 +46,8 @@ struct clp_fh_list_entry {
|
|||
#define CLP_UTIL_STR_LEN 64
|
||||
#define CLP_PFIP_NR_SEGMENTS 4
|
||||
|
||||
extern bool zpci_unique_uid;
|
||||
|
||||
/* List PCI functions request */
|
||||
struct clp_req_list_pci {
|
||||
struct clp_req_hdr hdr;
|
||||
|
@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
|
|||
u64 resume_token;
|
||||
u32 reserved2;
|
||||
u16 max_fn;
|
||||
u8 reserved3;
|
||||
u8 : 7;
|
||||
u8 uid_checking : 1;
|
||||
u8 entry_size;
|
||||
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
|
||||
} __packed;
|
||||
|
|
|
@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
|
|||
|
||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||
{
|
||||
typedef struct { char _[n]; } addrtype;
|
||||
struct addrtype { char _[256]; };
|
||||
int i;
|
||||
|
||||
*s = val;
|
||||
n = (n / 256) - 1;
|
||||
asm volatile(
|
||||
" mvc 8(248,%0),0(%0)\n"
|
||||
"0: mvc 256(256,%0),0(%0)\n"
|
||||
" la %0,256(%0)\n"
|
||||
" brct %1,0b\n"
|
||||
: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
|
||||
: "m" (*(addrtype *) s));
|
||||
for (i = 0; i < n; i += 256) {
|
||||
*s = val;
|
||||
asm volatile(
|
||||
"mvc 8(248,%[s]),0(%[s])\n"
|
||||
: "+m" (*(struct addrtype *) s)
|
||||
: [s] "a" (s));
|
||||
s += 256 / sizeof(long);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
#ifndef __ASM_PREEMPT_H
|
||||
#define __ASM_PREEMPT_H
|
||||
|
||||
#include <asm/current.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
{
|
||||
int old, new;
|
||||
|
||||
do {
|
||||
old = READ_ONCE(S390_lowcore.preempt_count);
|
||||
new = (old & PREEMPT_NEED_RESCHED) |
|
||||
(pc & ~PREEMPT_NEED_RESCHED);
|
||||
} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
|
||||
old, new) != old);
|
||||
}
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
{
|
||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
|
||||
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
||||
else
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
{
|
||||
__preempt_count_add(-val);
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
|
||||
preempt_offset);
|
||||
}
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define PREEMPT_ENABLED (0)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
{
|
||||
S390_lowcore.preempt_count = pc;
|
||||
}
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count += val;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count -= val;
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return !--S390_lowcore.preempt_count && tif_need_resched();
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(preempt_count() == preempt_offset &&
|
||||
tif_need_resched());
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
extern asmlinkage void preempt_schedule(void);
|
||||
#define __preempt_schedule() preempt_schedule()
|
||||
extern asmlinkage void preempt_schedule_notrace(void);
|
||||
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
#endif /* __ASM_PREEMPT_H */
|
|
@ -110,14 +110,20 @@ typedef struct {
|
|||
struct thread_struct {
|
||||
unsigned int acrs[NUM_ACRS];
|
||||
unsigned long ksp; /* kernel stack pointer */
|
||||
unsigned long user_timer; /* task cputime in user space */
|
||||
unsigned long system_timer; /* task cputime in kernel space */
|
||||
unsigned long sys_call_table; /* system call table address */
|
||||
mm_segment_t mm_segment;
|
||||
unsigned long gmap_addr; /* address of last gmap fault. */
|
||||
unsigned int gmap_write_flag; /* gmap fault write indication */
|
||||
unsigned int gmap_int_code; /* int code of last gmap fault */
|
||||
unsigned int gmap_pfault; /* signal of a pending guest pfault */
|
||||
/* Per-thread information related to debugging */
|
||||
struct per_regs per_user; /* User specified PER registers */
|
||||
struct per_event per_event; /* Cause of the last PER trap */
|
||||
unsigned long per_flags; /* Flags to control debug behavior */
|
||||
unsigned int system_call; /* system call number in signal */
|
||||
unsigned long last_break; /* last breaking-event-address. */
|
||||
/* pfault_wait is used to block the process on a pfault event */
|
||||
unsigned long pfault_wait;
|
||||
struct list_head list;
|
||||
|
|
|
@ -101,7 +101,8 @@ struct zpci_report_error_header {
|
|||
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
|
||||
} __packed;
|
||||
|
||||
int sclp_get_core_info(struct sclp_core_info *info);
|
||||
int _sclp_get_core_info_early(struct sclp_core_info *info);
|
||||
int _sclp_get_core_info(struct sclp_core_info *info);
|
||||
int sclp_core_configure(u8 core);
|
||||
int sclp_core_deconfigure(u8 core);
|
||||
int sclp_sdias_blk_count(void);
|
||||
|
@ -119,4 +120,11 @@ void sclp_early_detect(void);
|
|||
void _sclp_print_early(const char *);
|
||||
void sclp_ocf_cpc_name_copy(char *dst);
|
||||
|
||||
static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
|
||||
{
|
||||
if (early)
|
||||
return _sclp_get_core_info_early(info);
|
||||
return _sclp_get_core_info(info);
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_SCLP_H */
|
||||
|
|
|
@ -96,7 +96,8 @@ struct tm_scsw {
|
|||
u32 dstat:8;
|
||||
u32 cstat:8;
|
||||
u32 fcxs:8;
|
||||
u32 schxs:8;
|
||||
u32 ifob:1;
|
||||
u32 sesq:7;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/**
|
||||
|
@ -177,6 +178,9 @@ union scsw {
|
|||
#define SCHN_STAT_INTF_CTRL_CHK 0x02
|
||||
#define SCHN_STAT_CHAIN_CHECK 0x01
|
||||
|
||||
#define SCSW_SESQ_DEV_NOFCX 3
|
||||
#define SCSW_SESQ_PATH_NOFCX 4
|
||||
|
||||
/*
|
||||
* architectured values for first sense byte
|
||||
*/
|
||||
|
|
|
@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
|
|||
extern void smp_cpu_set_polarization(int cpu, int val);
|
||||
extern int smp_cpu_get_polarization(int cpu);
|
||||
extern void smp_fill_possible_mask(void);
|
||||
extern void smp_detect_cpus(void);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
|
@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
|
|||
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
|
||||
static inline void smp_yield_cpu(int cpu) { }
|
||||
static inline void smp_fill_possible_mask(void) { }
|
||||
static inline void smp_detect_cpus(void) { }
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* Return thread 0 CPU number as base CPU */
|
||||
static inline int smp_get_base_cpu(int cpu)
|
||||
{
|
||||
return cpu - (cpu % (smp_cpu_mtid + 1));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int smp_rescan_cpus(void);
|
||||
extern void __noreturn cpu_die(void);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
|
||||
#define __HAVE_ARCH_MEMCMP /* arch function */
|
||||
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
|
||||
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_STRCAT /* inline & arch function */
|
||||
|
@ -32,6 +33,7 @@
|
|||
extern int memcmp(const void *, const void *, size_t);
|
||||
extern void *memcpy(void *, const void *, size_t);
|
||||
extern void *memset(void *, int, size_t);
|
||||
extern void *memmove(void *, const void *, size_t);
|
||||
extern int strcmp(const char *,const char *);
|
||||
extern size_t strlcat(char *, const char *, size_t);
|
||||
extern size_t strlcpy(char *, const char *, size_t);
|
||||
|
@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
|
|||
extern char *strrchr(const char *, int);
|
||||
extern char *strstr(const char *, const char *);
|
||||
|
||||
#undef __HAVE_ARCH_MEMMOVE
|
||||
#undef __HAVE_ARCH_STRCHR
|
||||
#undef __HAVE_ARCH_STRNCHR
|
||||
#undef __HAVE_ARCH_STRNCMP
|
||||
|
|
|
@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
|
|||
char reserved_3[5];
|
||||
unsigned short cpus_dedicated;
|
||||
unsigned short cpus_shared;
|
||||
char reserved_4[3];
|
||||
unsigned char vsne;
|
||||
uuid_be uuid;
|
||||
char reserved_5[160];
|
||||
char ext_name[256];
|
||||
};
|
||||
|
||||
#define LPAR_CHAR_DEDICATED (1 << 7)
|
||||
|
@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
|
|||
unsigned int caf;
|
||||
char cpi[16];
|
||||
char reserved_1[3];
|
||||
char ext_name_encoding;
|
||||
unsigned char evmne;
|
||||
unsigned int reserved_2;
|
||||
uuid_be uuid;
|
||||
} vm[8];
|
||||
|
|
|
@ -12,10 +12,10 @@
|
|||
/*
|
||||
* Size of kernel stack for each process
|
||||
*/
|
||||
#define THREAD_ORDER 2
|
||||
#define THREAD_SIZE_ORDER 2
|
||||
#define ASYNC_ORDER 2
|
||||
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -30,15 +30,7 @@
|
|||
* - if the contents of this structure are changed, the assembly constants must also be changed
|
||||
*/
|
||||
struct thread_info {
|
||||
struct task_struct *task; /* main task structure */
|
||||
unsigned long flags; /* low level flags */
|
||||
unsigned long sys_call_table; /* System call table address */
|
||||
unsigned int cpu; /* current CPU */
|
||||
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
||||
unsigned int system_call;
|
||||
__u64 user_timer;
|
||||
__u64 system_timer;
|
||||
unsigned long last_break; /* last breaking-event-address. */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -46,26 +38,14 @@ struct thread_info {
|
|||
*/
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
.flags = 0, \
|
||||
.cpu = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
/* how to get the thread information struct from C */
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
return (struct thread_info *) S390_lowcore.thread_info;
|
||||
}
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk);
|
||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||
|
||||
#define THREAD_SIZE_ORDER THREAD_ORDER
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
|
|||
|
||||
void clock_comparator_work(void);
|
||||
|
||||
void __init ptff_init(void);
|
||||
void __init time_early_init(void);
|
||||
|
||||
extern unsigned char ptff_function_mask[16];
|
||||
extern unsigned long lpar_offset;
|
||||
extern unsigned long initial_leap_seconds;
|
||||
|
||||
/* Function codes for the ptff instruction. */
|
||||
#define PTFF_QAF 0x00 /* query available functions */
|
||||
|
@ -100,21 +98,28 @@ struct ptff_qui {
|
|||
unsigned int pad_0x5c[41];
|
||||
} __packed;
|
||||
|
||||
static inline int ptff(void *ptff_block, size_t len, unsigned int func)
|
||||
{
|
||||
typedef struct { char _[len]; } addrtype;
|
||||
register unsigned int reg0 asm("0") = func;
|
||||
register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
|
||||
int rc;
|
||||
|
||||
asm volatile(
|
||||
" .word 0x0104\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (rc), "+m" (*(addrtype *) ptff_block)
|
||||
: "d" (reg0), "d" (reg1) : "cc");
|
||||
return rc;
|
||||
}
|
||||
/*
|
||||
* ptff - Perform timing facility function
|
||||
* @ptff_block: Pointer to ptff parameter block
|
||||
* @len: Length of parameter block
|
||||
* @func: Function code
|
||||
* Returns: Condition code (0 on success)
|
||||
*/
|
||||
#define ptff(ptff_block, len, func) \
|
||||
({ \
|
||||
struct addrtype { char _[len]; }; \
|
||||
register unsigned int reg0 asm("0") = func; \
|
||||
register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
|
||||
int rc; \
|
||||
\
|
||||
asm volatile( \
|
||||
" .word 0x0104\n" \
|
||||
" ipm %0\n" \
|
||||
" srl %0,28\n" \
|
||||
: "=d" (rc), "+m" (*(struct addrtype *) reg1) \
|
||||
: "d" (reg0), "d" (reg1) : "cc"); \
|
||||
rc; \
|
||||
})
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
{
|
||||
|
|
|
@ -22,21 +22,22 @@ struct cpu_topology_s390 {
|
|||
cpumask_t drawer_mask;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
|
||||
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
extern cpumask_t cpus_with_topology;
|
||||
|
||||
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
|
||||
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
|
||||
#define topology_sibling_cpumask(cpu) \
|
||||
(&per_cpu(cpu_topology, cpu).thread_mask)
|
||||
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
|
||||
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
|
||||
#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
|
||||
#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
|
||||
#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
|
||||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
|
||||
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
|
||||
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
|
||||
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
|
||||
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
|
||||
|
||||
#define mc_capable() 1
|
||||
|
||||
void topology_init_early(void);
|
||||
int topology_cpu_init(struct cpu *);
|
||||
int topology_set_cpu_management(int fc);
|
||||
void topology_schedule_update(void);
|
||||
|
@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
|
|||
|
||||
#else /* CONFIG_SCHED_TOPOLOGY */
|
||||
|
||||
static inline void topology_init_early(void) { }
|
||||
static inline void topology_schedule_update(void) { }
|
||||
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
|
||||
static inline void topology_expect_change(void) { }
|
||||
|
@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
|
|||
#define cpu_to_node cpu_to_node
|
||||
static inline int cpu_to_node(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_topology, cpu).node_id;
|
||||
return cpu_topology[cpu].node_id;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
|
||||
|
|
|
@ -37,14 +37,14 @@
|
|||
#define get_ds() (KERNEL_DS)
|
||||
#define get_fs() (current->thread.mm_segment)
|
||||
|
||||
#define set_fs(x) \
|
||||
({ \
|
||||
#define set_fs(x) \
|
||||
{ \
|
||||
unsigned long __pto; \
|
||||
current->thread.mm_segment = (x); \
|
||||
__pto = current->thread.mm_segment.ar4 ? \
|
||||
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
|
||||
__ctl_load(__pto, 7, 7); \
|
||||
})
|
||||
}
|
||||
|
||||
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
|
||||
|
||||
|
|
|
@ -33,6 +33,8 @@ struct vdso_data {
|
|||
__u32 ectg_available; /* ECTG instruction present 0x58 */
|
||||
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
|
||||
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
|
||||
__u32 ts_dir; /* TOD steering direction 0x64 */
|
||||
__u64 ts_end; /* TOD steering end 0x68 */
|
||||
};
|
||||
|
||||
struct vdso_per_cpu_data {
|
||||
|
|
|
@ -12,6 +12,7 @@ header-y += dasd.h
|
|||
header-y += debug.h
|
||||
header-y += errno.h
|
||||
header-y += fcntl.h
|
||||
header-y += hypfs.h
|
||||
header-y += ioctl.h
|
||||
header-y += ioctls.h
|
||||
header-y += ipcbuf.h
|
||||
|
@ -29,16 +30,16 @@ header-y += ptrace.h
|
|||
header-y += qeth.h
|
||||
header-y += resource.h
|
||||
header-y += schid.h
|
||||
header-y += sclp_ctl.h
|
||||
header-y += sembuf.h
|
||||
header-y += setup.h
|
||||
header-y += shmbuf.h
|
||||
header-y += sie.h
|
||||
header-y += sigcontext.h
|
||||
header-y += siginfo.h
|
||||
header-y += signal.h
|
||||
header-y += socket.h
|
||||
header-y += sockios.h
|
||||
header-y += sclp_ctl.h
|
||||
header-y += sie.h
|
||||
header-y += stat.h
|
||||
header-y += statfs.h
|
||||
header-y += swab.h
|
||||
|
|
|
@ -2,20 +2,47 @@
|
|||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
KCOV_INSTRUMENT_early.o := n
|
||||
KCOV_INSTRUMENT_sclp.o := n
|
||||
KCOV_INSTRUMENT_als.o := n
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Don't trace early setup code and tracing code
|
||||
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Do not trace tracer code
|
||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Do not trace early setup code
|
||||
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
endif
|
||||
|
||||
GCOV_PROFILE_als.o := n
|
||||
GCOV_PROFILE_early.o := n
|
||||
GCOV_PROFILE_sclp.o := n
|
||||
|
||||
KCOV_INSTRUMENT_als.o := n
|
||||
KCOV_INSTRUMENT_early.o := n
|
||||
KCOV_INSTRUMENT_sclp.o := n
|
||||
|
||||
UBSAN_SANITIZE_als.o := n
|
||||
UBSAN_SANITIZE_early.o := n
|
||||
UBSAN_SANITIZE_sclp.o := n
|
||||
|
||||
#
|
||||
# Use -march=z900 for sclp.c and als.c to be able to print an error
|
||||
# message if the kernel is started on a machine which is too old
|
||||
#
|
||||
ifneq ($(CC_FLAGS_MARCH),-march=z900)
|
||||
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_als.o += -march=z900
|
||||
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_sclp.o += -march=z900
|
||||
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
|
||||
AFLAGS_head.o += -march=z900
|
||||
endif
|
||||
|
||||
#
|
||||
# Passing null pointers is ok for smp code, since we access the lowcore here.
|
||||
#
|
||||
CFLAGS_smp.o := -Wno-nonnull
|
||||
CFLAGS_smp.o := -Wno-nonnull
|
||||
|
||||
#
|
||||
# Disable tailcall optimizations for stack / callchain walking functions
|
||||
|
@ -30,27 +57,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
|
|||
#
|
||||
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
CFLAGS_sysinfo.o += -w
|
||||
|
||||
#
|
||||
# Use -march=z900 for sclp.c and als.c to be able to print an error
|
||||
# message if the kernel is started on a machine which is too old
|
||||
#
|
||||
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
|
||||
ifneq ($(CC_FLAGS_MARCH),-march=z900)
|
||||
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_sclp.o += -march=z900
|
||||
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_als.o += -march=z900
|
||||
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
|
||||
AFLAGS_head.o += -march=z900
|
||||
endif
|
||||
GCOV_PROFILE_sclp.o := n
|
||||
GCOV_PROFILE_als.o := n
|
||||
UBSAN_SANITIZE_als.o := n
|
||||
UBSAN_SANITIZE_early.o := n
|
||||
UBSAN_SANITIZE_sclp.o := n
|
||||
CFLAGS_sysinfo.o += -w
|
||||
|
||||
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
|
|
|
@ -25,12 +25,14 @@
|
|||
int main(void)
|
||||
{
|
||||
/* task struct offsets */
|
||||
OFFSET(__TASK_thread_info, task_struct, stack);
|
||||
OFFSET(__TASK_stack, task_struct, stack);
|
||||
OFFSET(__TASK_thread, task_struct, thread);
|
||||
OFFSET(__TASK_pid, task_struct, pid);
|
||||
BLANK();
|
||||
/* thread struct offsets */
|
||||
OFFSET(__THREAD_ksp, thread_struct, ksp);
|
||||
OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
|
||||
OFFSET(__THREAD_last_break, thread_struct, last_break);
|
||||
OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
|
||||
OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
|
||||
OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
|
||||
|
@ -39,14 +41,7 @@ int main(void)
|
|||
OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
|
||||
BLANK();
|
||||
/* thread info offsets */
|
||||
OFFSET(__TI_task, thread_info, task);
|
||||
OFFSET(__TI_flags, thread_info, flags);
|
||||
OFFSET(__TI_sysc_table, thread_info, sys_call_table);
|
||||
OFFSET(__TI_cpu, thread_info, cpu);
|
||||
OFFSET(__TI_precount, thread_info, preempt_count);
|
||||
OFFSET(__TI_user_timer, thread_info, user_timer);
|
||||
OFFSET(__TI_system_timer, thread_info, system_timer);
|
||||
OFFSET(__TI_last_break, thread_info, last_break);
|
||||
OFFSET(__TI_flags, task_struct, thread_info.flags);
|
||||
BLANK();
|
||||
/* pt_regs offsets */
|
||||
OFFSET(__PT_ARGS, pt_regs, args);
|
||||
|
@ -79,6 +74,8 @@ int main(void)
|
|||
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
|
||||
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
|
||||
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
|
||||
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
|
||||
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
|
||||
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
|
||||
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
|
||||
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
|
||||
|
@ -159,7 +156,6 @@ int main(void)
|
|||
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
|
||||
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
|
||||
OFFSET(__LC_CURRENT, lowcore, current_task);
|
||||
OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
|
||||
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
|
||||
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
|
||||
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
|
||||
|
@ -173,6 +169,7 @@ int main(void)
|
|||
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
|
||||
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
|
||||
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
|
||||
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
|
||||
OFFSET(__LC_GMAP, lowcore, gmap);
|
||||
OFFSET(__LC_PASTE, lowcore, paste);
|
||||
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
|
||||
|
|
|
@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
|
|||
/* set extra registers only for synchronous signals */
|
||||
regs->gprs[4] = regs->int_code & 127;
|
||||
regs->gprs[5] = regs->int_parm_long;
|
||||
regs->gprs[6] = task_thread_info(current)->last_break;
|
||||
regs->gprs[6] = current->thread.last_break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
|
|||
regs->gprs[2] = ksig->sig;
|
||||
regs->gprs[3] = (__force __u64) &frame->info;
|
||||
regs->gprs[4] = (__force __u64) &frame->uc;
|
||||
regs->gprs[5] = task_thread_info(current)->last_break;
|
||||
regs->gprs[5] = current->thread.last_break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
|
|||
psw.addr = (unsigned long) s390_base_pgm_handler;
|
||||
S390_lowcore.program_new_psw = psw;
|
||||
s390_base_pgm_handler_fn = early_pgm_check_handler;
|
||||
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
|
||||
}
|
||||
|
||||
static noinline __init void setup_facility_list(void)
|
||||
|
@ -391,7 +392,49 @@ static int __init cad_init(void)
|
|||
}
|
||||
early_initcall(cad_init);
|
||||
|
||||
static __init void rescue_initrd(void)
|
||||
static __init void memmove_early(void *dst, const void *src, size_t n)
|
||||
{
|
||||
unsigned long addr;
|
||||
long incr;
|
||||
psw_t old;
|
||||
|
||||
if (!n)
|
||||
return;
|
||||
incr = 1;
|
||||
if (dst > src) {
|
||||
incr = -incr;
|
||||
dst += n - 1;
|
||||
src += n - 1;
|
||||
}
|
||||
old = S390_lowcore.program_new_psw;
|
||||
S390_lowcore.program_new_psw.mask = __extract_psw();
|
||||
asm volatile(
|
||||
" larl %[addr],1f\n"
|
||||
" stg %[addr],%[psw_pgm_addr]\n"
|
||||
"0: mvc 0(1,%[dst]),0(%[src])\n"
|
||||
" agr %[dst],%[incr]\n"
|
||||
" agr %[src],%[incr]\n"
|
||||
" brctg %[n],0b\n"
|
||||
"1:\n"
|
||||
: [addr] "=&d" (addr),
|
||||
[psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
|
||||
[dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
|
||||
: [incr] "d" (incr)
|
||||
: "cc", "memory");
|
||||
S390_lowcore.program_new_psw = old;
|
||||
}
|
||||
|
||||
static __init noinline void ipl_save_parameters(void)
|
||||
{
|
||||
void *src, *dst;
|
||||
|
||||
src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
|
||||
dst = (void *) IPL_PARMBLOCK_ORIGIN;
|
||||
memmove_early(dst, src, PAGE_SIZE);
|
||||
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
|
||||
}
|
||||
|
||||
static __init noinline void rescue_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
|
||||
|
@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
|
|||
return;
|
||||
if (INITRD_START >= min_initrd_addr)
|
||||
return;
|
||||
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
|
||||
memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
|
||||
INITRD_START = min_initrd_addr;
|
||||
#endif
|
||||
}
|
||||
|
@ -467,7 +510,8 @@ void __init startup_init(void)
|
|||
ipl_save_parameters();
|
||||
rescue_initrd();
|
||||
clear_bss_section();
|
||||
ptff_init();
|
||||
ipl_verify_parameters();
|
||||
time_early_init();
|
||||
init_kernel_storage_key();
|
||||
lockdep_off();
|
||||
setup_lowcore_early();
|
||||
|
|
|
@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
|
|||
__PT_R14 = __PT_GPRS + 112
|
||||
__PT_R15 = __PT_GPRS + 120
|
||||
|
||||
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
|
||||
STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
|
||||
STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
|
@ -123,8 +123,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
|||
|
||||
.macro LAST_BREAK scratch
|
||||
srag \scratch,%r10,23
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
jz .+10
|
||||
stg %r10,__TI_last_break(%r12)
|
||||
stg %r10,__TASK_thread+__THREAD_last_break(%r12)
|
||||
#else
|
||||
jz .+14
|
||||
lghi \scratch,__TASK_thread
|
||||
stg %r10,__THREAD_last_break(\scratch,%r12)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro REENABLE_IRQS
|
||||
|
@ -186,14 +192,13 @@ ENTRY(__switch_to)
|
|||
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
|
||||
lgr %r1,%r2
|
||||
aghi %r1,__TASK_thread # thread_struct of prev task
|
||||
lg %r5,__TASK_thread_info(%r3) # get thread_info of next
|
||||
lg %r5,__TASK_stack(%r3) # start of kernel stack of next
|
||||
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
|
||||
lgr %r1,%r3
|
||||
aghi %r1,__TASK_thread # thread_struct of next task
|
||||
lgr %r15,%r5
|
||||
aghi %r15,STACK_INIT # end of kernel stack of next
|
||||
stg %r3,__LC_CURRENT # store task struct of next
|
||||
stg %r5,__LC_THREAD_INFO # store thread info of next
|
||||
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
|
||||
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
|
||||
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
|
||||
|
@ -274,7 +279,7 @@ ENTRY(system_call)
|
|||
.Lsysc_stmg:
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
lghi %r14,_PIF_SYSCALL
|
||||
.Lsysc_per:
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
|
@ -288,7 +293,13 @@ ENTRY(system_call)
|
|||
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
|
||||
stg %r14,__PT_FLAGS(%r11)
|
||||
.Lsysc_do_svc:
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
# load address of system call table
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
|
||||
#else
|
||||
lghi %r13,__TASK_thread
|
||||
lg %r10,__THREAD_sysc_table(%r13,%r12)
|
||||
#endif
|
||||
llgh %r8,__PT_INT_CODE+2(%r11)
|
||||
slag %r8,%r8,2 # shift and test for svc 0
|
||||
jnz .Lsysc_nr_ok
|
||||
|
@ -389,7 +400,6 @@ ENTRY(system_call)
|
|||
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
|
||||
jno .Lsysc_return
|
||||
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
lghi %r8,0 # svc 0 returns -ENOSYS
|
||||
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
|
||||
cghi %r1,NR_syscalls
|
||||
|
@ -457,7 +467,7 @@ ENTRY(system_call)
|
|||
#
|
||||
ENTRY(ret_from_fork)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
brasl %r14,schedule_tail
|
||||
TRACE_IRQS_ON
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
|
@ -478,7 +488,7 @@ ENTRY(pgm_check_handler)
|
|||
stpt __LC_SYNC_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_PGM_OLD_PSW
|
||||
tmhh %r8,0x0001 # test problem state bit
|
||||
|
@ -501,7 +511,7 @@ ENTRY(pgm_check_handler)
|
|||
2: LAST_BREAK %r14
|
||||
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
lg %r14,__TI_task(%r12)
|
||||
lgr %r14,%r12
|
||||
aghi %r14,__TASK_thread # pointer to thread_struct
|
||||
lghi %r13,__LC_PGM_TDB
|
||||
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
|
||||
|
@ -567,7 +577,7 @@ ENTRY(io_int_handler)
|
|||
stpt __LC_ASYNC_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_IO_OLD_PSW
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
|
||||
|
@ -626,7 +636,7 @@ ENTRY(io_int_handler)
|
|||
jo .Lio_work_user # yes -> do resched & signal
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# check for preemptive scheduling
|
||||
icm %r0,15,__TI_precount(%r12)
|
||||
icm %r0,15,__LC_PREEMPT_COUNT
|
||||
jnz .Lio_restore # preemption is disabled
|
||||
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
|
||||
jno .Lio_restore
|
||||
|
@ -741,7 +751,7 @@ ENTRY(ext_int_handler)
|
|||
stpt __LC_ASYNC_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_EXT_OLD_PSW
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
|
||||
|
@ -798,13 +808,10 @@ ENTRY(save_fpu_regs)
|
|||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||
bor %r14
|
||||
stfpc __THREAD_FPU_fpc(%r2)
|
||||
.Lsave_fpu_regs_fpc_end:
|
||||
lg %r3,__THREAD_FPU_regs(%r2)
|
||||
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
|
||||
jz .Lsave_fpu_regs_fp # no -> store FP regs
|
||||
.Lsave_fpu_regs_vx_low:
|
||||
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
|
||||
.Lsave_fpu_regs_vx_high:
|
||||
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
|
||||
j .Lsave_fpu_regs_done # -> set CIF_FPU flag
|
||||
.Lsave_fpu_regs_fp:
|
||||
|
@ -851,9 +858,7 @@ load_fpu_regs:
|
|||
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
|
||||
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
|
||||
jz .Lload_fpu_regs_fp # -> no VX, load FP regs
|
||||
.Lload_fpu_regs_vx:
|
||||
VLM %v0,%v15,0,%r4
|
||||
.Lload_fpu_regs_vx_high:
|
||||
VLM %v16,%v31,256,%r4
|
||||
j .Lload_fpu_regs_done
|
||||
.Lload_fpu_regs_fp:
|
||||
|
@ -889,7 +894,7 @@ ENTRY(mcck_int_handler)
|
|||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
|
||||
|
@ -948,7 +953,7 @@ ENTRY(mcck_int_handler)
|
|||
|
||||
.Lmcck_panic:
|
||||
lg %r15,__LC_PANIC_STACK
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
j .Lmcck_skip
|
||||
|
||||
#
|
||||
|
@ -1085,7 +1090,7 @@ cleanup_critical:
|
|||
jhe 0f
|
||||
# set up saved registers r10 and r12
|
||||
stg %r10,16(%r11) # r10 last break
|
||||
stg %r12,32(%r11) # r12 thread-info pointer
|
||||
stg %r12,32(%r11) # r12 task struct pointer
|
||||
0: # check if the user time update has been done
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn+24)
|
||||
jh 0f
|
||||
|
@ -1106,7 +1111,9 @@ cleanup_critical:
|
|||
lg %r9,16(%r11)
|
||||
srag %r9,%r9,23
|
||||
jz 0f
|
||||
mvc __TI_last_break(8,%r12),16(%r11)
|
||||
lgr %r9,%r12
|
||||
aghi %r9,__TASK_thread
|
||||
mvc __THREAD_last_break(8,%r9),16(%r11)
|
||||
0: # set up saved register r11
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
la %r9,STACK_FRAME_OVERHEAD(%r15)
|
||||
|
|
|
@ -315,7 +315,7 @@ ENTRY(startup_kdump)
|
|||
jg startup_continue
|
||||
|
||||
.Lstack:
|
||||
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
|
||||
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
|
||||
.align 8
|
||||
6: .long 0x7fffffff,0xffffffff
|
||||
|
||||
|
|
|
@ -32,11 +32,10 @@ ENTRY(startup_continue)
|
|||
#
|
||||
# Setup stack
|
||||
#
|
||||
larl %r15,init_thread_union
|
||||
stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
|
||||
lg %r14,__TI_task(%r15) # cache current in lowcore
|
||||
larl %r14,init_task
|
||||
stg %r14,__LC_CURRENT
|
||||
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
|
||||
larl %r15,init_thread_union
|
||||
aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
|
||||
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
|
||||
aghi %r15,-160
|
||||
#
|
||||
|
|
|
@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
|
|||
diag308_set_works = 1;
|
||||
}
|
||||
|
||||
void __init ipl_save_parameters(void)
|
||||
void __init ipl_verify_parameters(void)
|
||||
{
|
||||
struct cio_iplinfo iplinfo;
|
||||
void *src, *dst;
|
||||
|
||||
if (cio_get_iplinfo(&iplinfo))
|
||||
return;
|
||||
|
@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
|
|||
if (!iplinfo.is_qdio)
|
||||
return;
|
||||
ipl_flags |= IPL_PARMBLOCK_VALID;
|
||||
src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
|
||||
dst = (void *)IPL_PARMBLOCK_ORIGIN;
|
||||
memmove(dst, src, PAGE_SIZE);
|
||||
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
|
||||
}
|
||||
|
||||
static LIST_HEAD(rcall);
|
||||
|
|
|
@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
|
|||
old = current_stack_pointer();
|
||||
/* Check against async. stack address range. */
|
||||
new = S390_lowcore.async_stack;
|
||||
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
|
||||
if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
|
||||
/* Need to switch to the async. stack. */
|
||||
new -= STACK_FRAME_OVERHEAD;
|
||||
((struct stack_frame *) new)->back_chain = old;
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/facility.h>
|
||||
|
@ -183,4 +184,4 @@ static int __init lgr_init(void)
|
|||
lgr_timer_set();
|
||||
return 0;
|
||||
}
|
||||
module_init(lgr_init);
|
||||
device_initcall(lgr_init);
|
||||
|
|
|
@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
|
|||
regs.int_parm = CPU_MF_INT_SF_PRA;
|
||||
sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long;
|
||||
|
||||
regs.psw.addr = sfr->basic.ia;
|
||||
if (sfr->basic.T)
|
||||
regs.psw.mask |= PSW_MASK_DAT;
|
||||
if (sfr->basic.W)
|
||||
regs.psw.mask |= PSW_MASK_WAIT;
|
||||
if (sfr->basic.P)
|
||||
regs.psw.mask |= PSW_MASK_PSTATE;
|
||||
switch (sfr->basic.AS) {
|
||||
case 0x0:
|
||||
regs.psw.mask |= PSW_ASC_PRIMARY;
|
||||
break;
|
||||
case 0x1:
|
||||
regs.psw.mask |= PSW_ASC_ACCREG;
|
||||
break;
|
||||
case 0x2:
|
||||
regs.psw.mask |= PSW_ASC_SECONDARY;
|
||||
break;
|
||||
case 0x3:
|
||||
regs.psw.mask |= PSW_ASC_HOME;
|
||||
break;
|
||||
}
|
||||
psw_bits(regs.psw).ia = sfr->basic.ia;
|
||||
psw_bits(regs.psw).t = sfr->basic.T;
|
||||
psw_bits(regs.psw).w = sfr->basic.W;
|
||||
psw_bits(regs.psw).p = sfr->basic.P;
|
||||
psw_bits(regs.psw).as = sfr->basic.AS;
|
||||
|
||||
/*
|
||||
* A non-zero guest program parameter indicates a guest
|
||||
* sample.
|
||||
* Note that some early samples or samples from guests without
|
||||
* Use the hardware provided configuration level to decide if the
|
||||
* sample belongs to a guest or host. If that is not available,
|
||||
* fall back to the following heuristics:
|
||||
* A non-zero guest program parameter always indicates a guest
|
||||
* sample. Some early samples or samples from guests without
|
||||
* lpp usage would be misaccounted to the host. We use the asn
|
||||
* value as a heuristic to detect most of these guest samples.
|
||||
* If the value differs from the host hpp value, we assume
|
||||
* it to be a KVM guest.
|
||||
* value as an addon heuristic to detect most of these guest samples.
|
||||
* If the value differs from the host hpp value, we assume to be a
|
||||
* KVM guest.
|
||||
*/
|
||||
if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
|
||||
switch (sfr->basic.CL) {
|
||||
case 1: /* logical partition */
|
||||
sde_regs->in_guest = 0;
|
||||
break;
|
||||
case 2: /* virtual machine */
|
||||
sde_regs->in_guest = 1;
|
||||
break;
|
||||
default: /* old machine, use heuristics */
|
||||
if (sfr->basic.gpp ||
|
||||
sfr->basic.prim_asn != (u16)sfr->basic.hpp)
|
||||
sde_regs->in_guest = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
overflow = 0;
|
||||
if (perf_exclude_event(event, ®s, sde_regs))
|
||||
|
|
|
@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|||
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
unsigned long arg, struct task_struct *p)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
struct fake_frame
|
||||
{
|
||||
struct stack_frame sf;
|
||||
|
@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
|||
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
|
||||
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
|
||||
/* Initialize per thread user and system timer values */
|
||||
ti = task_thread_info(p);
|
||||
ti->user_timer = 0;
|
||||
ti->system_timer = 0;
|
||||
p->thread.user_timer = 0;
|
||||
p->thread.system_timer = 0;
|
||||
|
||||
frame->sf.back_chain = 0;
|
||||
/* new return point is ret_from_fork */
|
||||
|
|
|
@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
}
|
||||
return 0;
|
||||
case PTRACE_GET_LAST_BREAK:
|
||||
put_user(task_thread_info(child)->last_break,
|
||||
put_user(child->thread.last_break,
|
||||
(unsigned long __user *) data);
|
||||
return 0;
|
||||
case PTRACE_ENABLE_TE:
|
||||
|
@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
}
|
||||
return 0;
|
||||
case PTRACE_GET_LAST_BREAK:
|
||||
put_user(task_thread_info(child)->last_break,
|
||||
put_user(child->thread.last_break,
|
||||
(unsigned int __user *) data);
|
||||
return 0;
|
||||
}
|
||||
|
@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
|
|||
if (count > 0) {
|
||||
if (kbuf) {
|
||||
unsigned long *k = kbuf;
|
||||
*k = task_thread_info(target)->last_break;
|
||||
*k = target->thread.last_break;
|
||||
} else {
|
||||
unsigned long __user *u = ubuf;
|
||||
if (__put_user(task_thread_info(target)->last_break, u))
|
||||
if (__put_user(target->thread.last_break, u))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
|
|||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
unsigned int *data = &task_thread_info(target)->system_call;
|
||||
unsigned int *data = &target->thread.system_call;
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(unsigned int));
|
||||
}
|
||||
|
@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
|
|||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
unsigned int *data = &task_thread_info(target)->system_call;
|
||||
unsigned int *data = &target->thread.system_call;
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(unsigned int));
|
||||
}
|
||||
|
@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
|
|||
compat_ulong_t last_break;
|
||||
|
||||
if (count > 0) {
|
||||
last_break = task_thread_info(target)->last_break;
|
||||
last_break = target->thread.last_break;
|
||||
if (kbuf) {
|
||||
unsigned long *k = kbuf;
|
||||
*k = last_break;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/root_dev.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/pfn.h>
|
||||
|
@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
|
|||
* Setup lowcore for boot cpu
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
|
||||
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
|
||||
lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
|
||||
lc->restart_psw.mask = PSW_KERNEL_BITS;
|
||||
lc->restart_psw.addr = (unsigned long) restart_int_handler;
|
||||
lc->external_new_psw.mask = PSW_KERNEL_BITS |
|
||||
|
@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
|
|||
lc->kernel_stack = ((unsigned long) &init_thread_union)
|
||||
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->async_stack = (unsigned long)
|
||||
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
|
||||
memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
|
||||
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->panic_stack = (unsigned long)
|
||||
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
|
||||
memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
|
||||
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
|
||||
lc->thread_info = (unsigned long) &init_thread_union;
|
||||
lc->current_task = (unsigned long)&init_task;
|
||||
lc->lpp = LPP_MAGIC;
|
||||
lc->machine_flags = S390_lowcore.machine_flags;
|
||||
lc->preempt_count = S390_lowcore.preempt_count;
|
||||
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
|
||||
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
|
||||
MAX_FACILITY_BIT/8);
|
||||
|
@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
|
|||
lc->last_update_timer = S390_lowcore.last_update_timer;
|
||||
lc->last_update_clock = S390_lowcore.last_update_clock;
|
||||
|
||||
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
|
||||
restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
|
||||
restart_stack += ASYNC_SIZE;
|
||||
|
||||
/*
|
||||
|
@ -412,7 +413,7 @@ static void __init setup_resources(void)
|
|||
bss_resource.end = (unsigned long) &__bss_stop - 1;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
res = alloc_bootmem_low(sizeof(*res));
|
||||
res = memblock_virt_alloc(sizeof(*res), 8);
|
||||
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
|
||||
|
||||
res->name = "System RAM";
|
||||
|
@ -426,7 +427,7 @@ static void __init setup_resources(void)
|
|||
std_res->start > res->end)
|
||||
continue;
|
||||
if (std_res->end > res->end) {
|
||||
sub_res = alloc_bootmem_low(sizeof(*sub_res));
|
||||
sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
|
||||
*sub_res = *std_res;
|
||||
sub_res->end = res->end;
|
||||
std_res->start = res->end + 1;
|
||||
|
@ -445,7 +446,7 @@ static void __init setup_resources(void)
|
|||
* part of the System RAM resource.
|
||||
*/
|
||||
if (crashk_res.end) {
|
||||
memblock_add(crashk_res.start, resource_size(&crashk_res));
|
||||
memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
|
||||
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
|
||||
insert_resource(&iomem_resource, &crashk_res);
|
||||
}
|
||||
|
@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
setup_memory_end();
|
||||
setup_memory();
|
||||
dma_contiguous_reserve(memory_end);
|
||||
|
||||
check_initrd();
|
||||
reserve_crashkernel();
|
||||
|
@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
cpu_detect_mhz_feature();
|
||||
cpu_init();
|
||||
numa_setup();
|
||||
smp_detect_cpus();
|
||||
topology_init_early();
|
||||
|
||||
/*
|
||||
* Create kernel page tables and switch to virtual addressing.
|
||||
|
|
|
@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
|
|||
/* set extra registers only for synchronous signals */
|
||||
regs->gprs[4] = regs->int_code & 127;
|
||||
regs->gprs[5] = regs->int_parm_long;
|
||||
regs->gprs[6] = task_thread_info(current)->last_break;
|
||||
regs->gprs[6] = current->thread.last_break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|||
regs->gprs[2] = ksig->sig;
|
||||
regs->gprs[3] = (unsigned long) &frame->info;
|
||||
regs->gprs[4] = (unsigned long) &frame->uc;
|
||||
regs->gprs[5] = task_thread_info(current)->last_break;
|
||||
regs->gprs[5] = current->thread.last_break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
|
|||
* the debugger may change all our registers, including the system
|
||||
* call information.
|
||||
*/
|
||||
current_thread_info()->system_call =
|
||||
current->thread.system_call =
|
||||
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (current_thread_info()->system_call) {
|
||||
regs->int_code = current_thread_info()->system_call;
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
/* Check for system call restarting. */
|
||||
switch (regs->gprs[2]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
|
@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
|
|||
|
||||
/* No handlers present - check for system call restart */
|
||||
clear_pt_regs_flag(regs, PIF_SYSCALL);
|
||||
if (current_thread_info()->system_call) {
|
||||
regs->int_code = current_thread_info()->system_call;
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
switch (regs->gprs[2]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
/* Restart with sys_restart_syscall */
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
|||
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
|
||||
{
|
||||
struct lowcore *lc = pcpu->lowcore;
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
|
||||
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
|
||||
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->thread_info = (unsigned long) task_thread_info(tsk);
|
||||
lc->current_task = (unsigned long) tsk;
|
||||
lc->lpp = LPP_MAGIC;
|
||||
lc->current_pid = tsk->pid;
|
||||
lc->user_timer = ti->user_timer;
|
||||
lc->system_timer = ti->system_timer;
|
||||
lc->user_timer = tsk->thread.user_timer;
|
||||
lc->system_timer = tsk->thread.system_timer;
|
||||
lc->steal_timer = 0;
|
||||
}
|
||||
|
||||
|
@ -662,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
|
|||
return pcpu_devices[cpu].polarization;
|
||||
}
|
||||
|
||||
static struct sclp_core_info *smp_get_core_info(void)
|
||||
static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
|
||||
{
|
||||
static int use_sigp_detection;
|
||||
struct sclp_core_info *info;
|
||||
int address;
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info && (use_sigp_detection || sclp_get_core_info(info))) {
|
||||
if (use_sigp_detection || sclp_get_core_info(info, early)) {
|
||||
use_sigp_detection = 1;
|
||||
for (address = 0;
|
||||
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
|
||||
|
@ -683,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
|
|||
}
|
||||
info->combined = info->configured;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
static int smp_add_present_cpu(int cpu);
|
||||
|
@ -724,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
|
|||
return nr;
|
||||
}
|
||||
|
||||
static void __init smp_detect_cpus(void)
|
||||
void __init smp_detect_cpus(void)
|
||||
{
|
||||
unsigned int cpu, mtid, c_cpus, s_cpus;
|
||||
struct sclp_core_info *info;
|
||||
u16 address;
|
||||
|
||||
/* Get CPU information */
|
||||
info = smp_get_core_info();
|
||||
if (!info)
|
||||
panic("smp_detect_cpus failed to allocate memory\n");
|
||||
|
||||
info = memblock_virt_alloc(sizeof(*info), 8);
|
||||
smp_get_core_info(info, 1);
|
||||
/* Find boot CPU type */
|
||||
if (sclp.has_core_type) {
|
||||
address = stap();
|
||||
|
@ -770,7 +764,7 @@ static void __init smp_detect_cpus(void)
|
|||
get_online_cpus();
|
||||
__smp_rescan_cpus(info, 0);
|
||||
put_online_cpus();
|
||||
kfree(info);
|
||||
memblock_free_early((unsigned long)info, sizeof(*info));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -807,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
pcpu = pcpu_devices + cpu;
|
||||
if (pcpu->state != CPU_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
base = cpu - (cpu % (smp_cpu_mtid + 1));
|
||||
base = smp_get_base_cpu(cpu);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (base + i < nr_cpu_ids)
|
||||
if (cpu_online(base + i))
|
||||
|
@ -907,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
/* request the 0x1202 external call external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1202");
|
||||
smp_detect_cpus();
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
|
@ -973,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
|
|||
rc = -EBUSY;
|
||||
/* disallow configuration changes of online cpus and cpu 0 */
|
||||
cpu = dev->id;
|
||||
cpu -= cpu % (smp_cpu_mtid + 1);
|
||||
cpu = smp_get_base_cpu(cpu);
|
||||
if (cpu == 0)
|
||||
goto out;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
|
@ -1106,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
|
|||
struct sclp_core_info *info;
|
||||
int nr;
|
||||
|
||||
info = smp_get_core_info();
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
smp_get_core_info(info, 0);
|
||||
get_online_cpus();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
nr = __smp_rescan_cpus(info, 1);
|
||||
|
|
|
@ -194,7 +194,7 @@ pgm_check_entry:
|
|||
|
||||
/* Suspend CPU not available -> panic */
|
||||
larl %r15,init_thread_union
|
||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
|
||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
|
||||
larl %r2,.Lpanic_string
|
||||
larl %r3,_sclp_print_early
|
||||
lghi %r1,0
|
||||
|
|
|
@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
|
|||
}
|
||||
EXPORT_SYMBOL(stsi);
|
||||
|
||||
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
|
||||
{
|
||||
switch (encoding) {
|
||||
case 1: /* EBCDIC */
|
||||
EBCASC(name, len);
|
||||
break;
|
||||
case 2: /* UTF-8 */
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
|
||||
{
|
||||
int i;
|
||||
|
@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
|
|||
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
|
||||
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
|
||||
}
|
||||
if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
|
||||
seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
|
||||
seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_ext_name(struct seq_file *m, int lvl,
|
||||
struct sysinfo_3_2_2 *info)
|
||||
{
|
||||
if (info->vm[lvl].ext_name_encoding == 0)
|
||||
size_t len = sizeof(info->ext_names[lvl]);
|
||||
|
||||
if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
|
||||
return;
|
||||
if (info->ext_names[lvl][0] == 0)
|
||||
return;
|
||||
switch (info->vm[lvl].ext_name_encoding) {
|
||||
case 1: /* EBCDIC */
|
||||
EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
|
||||
break;
|
||||
case 2: /* UTF-8 */
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
|
||||
info->ext_names[lvl]);
|
||||
}
|
||||
|
|
|
@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
|
|||
EXPORT_SYMBOL(s390_epoch_delta_notifier);
|
||||
|
||||
unsigned char ptff_function_mask[16];
|
||||
unsigned long lpar_offset;
|
||||
unsigned long initial_leap_seconds;
|
||||
|
||||
static unsigned long long lpar_offset;
|
||||
static unsigned long long initial_leap_seconds;
|
||||
static unsigned long long tod_steering_end;
|
||||
static long long tod_steering_delta;
|
||||
|
||||
/*
|
||||
* Get time offsets with PTFF
|
||||
*/
|
||||
void __init ptff_init(void)
|
||||
void __init time_early_init(void)
|
||||
{
|
||||
struct ptff_qto qto;
|
||||
struct ptff_qui qui;
|
||||
|
||||
/* Initialize TOD steering parameters */
|
||||
tod_steering_end = sched_clock_base_cc;
|
||||
vdso_data->ts_end = tod_steering_end;
|
||||
|
||||
if (!test_facility(28))
|
||||
return;
|
||||
|
||||
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
|
||||
|
||||
/* get LPAR offset */
|
||||
|
@ -80,7 +88,7 @@ void __init ptff_init(void)
|
|||
|
||||
/* get initial leap seconds */
|
||||
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
|
||||
initial_leap_seconds = (unsigned long)
|
||||
initial_leap_seconds = (unsigned long long)
|
||||
((long) qui.old_leap * 4096000000L);
|
||||
}
|
||||
|
||||
|
@ -123,18 +131,6 @@ void clock_comparator_work(void)
|
|||
cd->event_handler(cd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixup the clock comparator.
|
||||
*/
|
||||
static void fixup_clock_comparator(unsigned long long delta)
|
||||
{
|
||||
/* If nobody is waiting there's nothing to fix. */
|
||||
if (S390_lowcore.clock_comparator == -1ULL)
|
||||
return;
|
||||
S390_lowcore.clock_comparator += delta;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
static int s390_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
|
@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
|
|||
|
||||
static cycle_t read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
return get_tod_clock();
|
||||
unsigned long long now, adj;
|
||||
|
||||
preempt_disable(); /* protect from changes to steering parameters */
|
||||
now = get_tod_clock();
|
||||
adj = tod_steering_end - now;
|
||||
if (unlikely((s64) adj >= 0))
|
||||
/*
|
||||
* manually steer by 1 cycle every 2^16 cycles. This
|
||||
* corresponds to shifting the tod delta by 15. 1s is
|
||||
* therefore steered in ~9h. The adjust will decrease
|
||||
* over time, until it finally reaches 0.
|
||||
*/
|
||||
now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
|
||||
preempt_enable();
|
||||
return now;
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_tod = {
|
||||
|
@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply clock delta to the global data structures.
|
||||
* This is called once on the CPU that performed the clock sync.
|
||||
*/
|
||||
static void clock_sync_global(unsigned long long delta)
|
||||
{
|
||||
unsigned long now, adj;
|
||||
struct ptff_qto qto;
|
||||
|
||||
/* Fixup the monotonic sched clock. */
|
||||
sched_clock_base_cc += delta;
|
||||
/* Adjust TOD steering parameters. */
|
||||
vdso_data->tb_update_count++;
|
||||
now = get_tod_clock();
|
||||
adj = tod_steering_end - now;
|
||||
if (unlikely((s64) adj >= 0))
|
||||
/* Calculate how much of the old adjustment is left. */
|
||||
tod_steering_delta = (tod_steering_delta < 0) ?
|
||||
-(adj >> 15) : (adj >> 15);
|
||||
tod_steering_delta += delta;
|
||||
if ((abs(tod_steering_delta) >> 48) != 0)
|
||||
panic("TOD clock sync offset %lli is too large to drift\n",
|
||||
tod_steering_delta);
|
||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||
vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
|
||||
vdso_data->ts_end = tod_steering_end;
|
||||
vdso_data->tb_update_count++;
|
||||
/* Update LPAR offset. */
|
||||
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
|
||||
lpar_offset = qto.tod_epoch_difference;
|
||||
/* Call the TOD clock change notifier. */
|
||||
atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply clock delta to the per-CPU data structures of this CPU.
|
||||
* This is called for each online CPU after the call to clock_sync_global.
|
||||
*/
|
||||
static void clock_sync_local(unsigned long long delta)
|
||||
{
|
||||
/* Add the delta to the clock comparator. */
|
||||
if (S390_lowcore.clock_comparator != -1ULL) {
|
||||
S390_lowcore.clock_comparator += delta;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
/* Adjust the last_update_clock time-stamp. */
|
||||
S390_lowcore.last_update_clock += delta;
|
||||
}
|
||||
|
||||
/* Single threaded workqueue used for stp sync events */
|
||||
static struct workqueue_struct *time_sync_wq;
|
||||
|
||||
|
@ -397,31 +456,9 @@ static void __init time_init_wq(void)
|
|||
struct clock_sync_data {
|
||||
atomic_t cpus;
|
||||
int in_sync;
|
||||
unsigned long long fixup_cc;
|
||||
unsigned long long clock_delta;
|
||||
};
|
||||
|
||||
static void clock_sync_cpu(struct clock_sync_data *sync)
|
||||
{
|
||||
atomic_dec(&sync->cpus);
|
||||
enable_sync_clock();
|
||||
while (sync->in_sync == 0) {
|
||||
__udelay(1);
|
||||
/*
|
||||
* A different cpu changes *in_sync. Therefore use
|
||||
* barrier() to force memory access.
|
||||
*/
|
||||
barrier();
|
||||
}
|
||||
if (sync->in_sync != 1)
|
||||
/* Didn't work. Clear per-cpu in sync bit again. */
|
||||
disable_sync_clock(NULL);
|
||||
/*
|
||||
* This round of TOD syncing is done. Set the clock comparator
|
||||
* to the next tick and let the processor continue.
|
||||
*/
|
||||
fixup_clock_comparator(sync->fixup_cc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Server Time Protocol (STP) code.
|
||||
*/
|
||||
|
@ -523,54 +560,46 @@ void stp_queue_work(void)
|
|||
|
||||
static int stp_sync_clock(void *data)
|
||||
{
|
||||
static int first;
|
||||
struct clock_sync_data *sync = data;
|
||||
unsigned long long clock_delta;
|
||||
struct clock_sync_data *stp_sync;
|
||||
struct ptff_qto qto;
|
||||
static int first;
|
||||
int rc;
|
||||
|
||||
stp_sync = data;
|
||||
|
||||
if (xchg(&first, 1) == 1) {
|
||||
/* Slave */
|
||||
clock_sync_cpu(stp_sync);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait until all other cpus entered the sync function. */
|
||||
while (atomic_read(&stp_sync->cpus) != 0)
|
||||
cpu_relax();
|
||||
|
||||
enable_sync_clock();
|
||||
|
||||
rc = 0;
|
||||
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
||||
stp_info.todoff[2] || stp_info.todoff[3] ||
|
||||
stp_info.tmd != 2) {
|
||||
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
|
||||
if (rc == 0) {
|
||||
/* fixup the monotonic sched clock */
|
||||
sched_clock_base_cc += clock_delta;
|
||||
if (ptff_query(PTFF_QTO) &&
|
||||
ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
|
||||
/* Update LPAR offset */
|
||||
lpar_offset = qto.tod_epoch_difference;
|
||||
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
|
||||
0, &clock_delta);
|
||||
stp_sync->fixup_cc = clock_delta;
|
||||
fixup_clock_comparator(clock_delta);
|
||||
rc = chsc_sstpi(stp_page, &stp_info,
|
||||
sizeof(struct stp_sstpi));
|
||||
if (rc == 0 && stp_info.tmd != 2)
|
||||
rc = -EAGAIN;
|
||||
if (xchg(&first, 1) == 0) {
|
||||
/* Wait until all other cpus entered the sync function. */
|
||||
while (atomic_read(&sync->cpus) != 0)
|
||||
cpu_relax();
|
||||
rc = 0;
|
||||
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
||||
stp_info.todoff[2] || stp_info.todoff[3] ||
|
||||
stp_info.tmd != 2) {
|
||||
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
|
||||
&clock_delta);
|
||||
if (rc == 0) {
|
||||
sync->clock_delta = clock_delta;
|
||||
clock_sync_global(clock_delta);
|
||||
rc = chsc_sstpi(stp_page, &stp_info,
|
||||
sizeof(struct stp_sstpi));
|
||||
if (rc == 0 && stp_info.tmd != 2)
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
}
|
||||
sync->in_sync = rc ? -EAGAIN : 1;
|
||||
xchg(&first, 0);
|
||||
} else {
|
||||
/* Slave */
|
||||
atomic_dec(&sync->cpus);
|
||||
/* Wait for in_sync to be set. */
|
||||
while (READ_ONCE(sync->in_sync) == 0)
|
||||
__udelay(1);
|
||||
}
|
||||
if (rc) {
|
||||
if (sync->in_sync != 1)
|
||||
/* Didn't work. Clear per-cpu in sync bit again. */
|
||||
disable_sync_clock(NULL);
|
||||
stp_sync->in_sync = -EAGAIN;
|
||||
} else
|
||||
stp_sync->in_sync = 1;
|
||||
xchg(&first, 0);
|
||||
/* Apply clock delta to per-CPU fields of this CPU. */
|
||||
clock_sync_local(sync->clock_delta);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
|
@ -41,15 +42,17 @@ static bool topology_enabled = true;
|
|||
static DECLARE_WORK(topology_work, topology_work_fn);
|
||||
|
||||
/*
|
||||
* Socket/Book linked lists and per_cpu(cpu_topology) updates are
|
||||
* Socket/Book linked lists and cpu_topology updates are
|
||||
* protected by "sched_domains_mutex".
|
||||
*/
|
||||
static struct mask_info socket_info;
|
||||
static struct mask_info book_info;
|
||||
static struct mask_info drawer_info;
|
||||
|
||||
DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
|
||||
struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
EXPORT_SYMBOL_GPL(cpu_topology);
|
||||
|
||||
cpumask_t cpus_with_topology;
|
||||
|
||||
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
|
||||
{
|
||||
|
@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
|||
if (lcpu < 0)
|
||||
continue;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
topo = &per_cpu(cpu_topology, lcpu + i);
|
||||
topo = &cpu_topology[lcpu + i];
|
||||
topo->drawer_id = drawer->id;
|
||||
topo->book_id = book->id;
|
||||
topo->socket_id = socket->id;
|
||||
|
@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
|||
cpumask_set_cpu(lcpu + i, &drawer->mask);
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
cpumask_set_cpu(lcpu + i, &cpus_with_topology);
|
||||
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
|
||||
}
|
||||
}
|
||||
|
@ -220,7 +224,7 @@ static void update_cpu_masks(void)
|
|||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
topo = &per_cpu(cpu_topology, cpu);
|
||||
topo = &cpu_topology[cpu];
|
||||
topo->thread_mask = cpu_thread_map(cpu);
|
||||
topo->core_mask = cpu_group_map(&socket_info, cpu);
|
||||
topo->book_mask = cpu_group_map(&book_info, cpu);
|
||||
|
@ -231,6 +235,8 @@ static void update_cpu_masks(void)
|
|||
topo->socket_id = cpu;
|
||||
topo->book_id = cpu;
|
||||
topo->drawer_id = cpu;
|
||||
if (cpu_present(cpu))
|
||||
cpumask_set_cpu(cpu, &cpus_with_topology);
|
||||
}
|
||||
}
|
||||
numa_update_cpu_topology();
|
||||
|
@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
|
|||
stsi(info, 15, 1, min(topology_max_mnest, 4));
|
||||
}
|
||||
|
||||
int arch_update_cpu_topology(void)
|
||||
static int __arch_update_cpu_topology(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info = tl_info;
|
||||
struct device *dev;
|
||||
int cpu, rc = 0;
|
||||
int rc = 0;
|
||||
|
||||
cpumask_clear(&cpus_with_topology);
|
||||
if (MACHINE_HAS_TOPOLOGY) {
|
||||
rc = 1;
|
||||
store_topology(info);
|
||||
|
@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
|
|||
update_cpu_masks();
|
||||
if (!MACHINE_HAS_TOPOLOGY)
|
||||
topology_update_polarization_simple();
|
||||
return rc;
|
||||
}
|
||||
|
||||
int arch_update_cpu_topology(void)
|
||||
{
|
||||
struct device *dev;
|
||||
int cpu, rc;
|
||||
|
||||
rc = __arch_update_cpu_topology();
|
||||
for_each_online_cpu(cpu) {
|
||||
dev = get_cpu_device(cpu);
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
|
@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
|
|||
|
||||
static const struct cpumask *cpu_thread_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).thread_mask;
|
||||
return &cpu_topology[cpu].thread_mask;
|
||||
}
|
||||
|
||||
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).core_mask;
|
||||
return &cpu_topology[cpu].core_mask;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpu_book_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).book_mask;
|
||||
return &cpu_topology[cpu].book_mask;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpu_drawer_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).drawer_mask;
|
||||
return &cpu_topology[cpu].drawer_mask;
|
||||
}
|
||||
|
||||
static int __init early_parse_topology(char *p)
|
||||
|
@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
|
|||
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
|
||||
nr_masks = max(nr_masks, 1);
|
||||
for (i = 0; i < nr_masks; i++) {
|
||||
mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
|
||||
mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
|
||||
mask = mask->next;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init s390_topology_init(void)
|
||||
void __init topology_init_early(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info;
|
||||
int i;
|
||||
|
||||
set_sched_topology(s390_topology);
|
||||
if (!MACHINE_HAS_TOPOLOGY)
|
||||
return 0;
|
||||
tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
|
||||
goto out;
|
||||
tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
|
||||
info = tl_info;
|
||||
store_topology(info);
|
||||
pr_info("The CPU configuration topology of the machine is:");
|
||||
|
@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
|
|||
alloc_masks(info, &socket_info, 1);
|
||||
alloc_masks(info, &book_info, 2);
|
||||
alloc_masks(info, &drawer_info, 3);
|
||||
set_sched_topology(s390_topology);
|
||||
return 0;
|
||||
out:
|
||||
__arch_update_cpu_topology();
|
||||
}
|
||||
early_initcall(s390_topology_init);
|
||||
|
||||
static int __init topology_init(void)
|
||||
{
|
||||
|
|
|
@ -99,8 +99,27 @@ __kernel_clock_gettime:
|
|||
tml %r4,0x0001 /* pending update ? loop */
|
||||
jnz 11b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lm %r0,%r1,1(%r15)
|
||||
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
s %r0,1(%r15) /* no - ts_steering_end */
|
||||
sl %r1,5(%r15)
|
||||
brc 3,22f
|
||||
ahi %r0,-1
|
||||
22: ltr %r0,%r0 /* past end of steering? */
|
||||
jm 24f
|
||||
srdl %r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 23f
|
||||
lcr %r0,%r0 /* negative TOD offset */
|
||||
lcr %r1,%r1
|
||||
je 23f
|
||||
ahi %r0,-1
|
||||
23: a %r0,1(%r15) /* add TOD timestamp */
|
||||
al %r1,5(%r15)
|
||||
brc 12,25f
|
||||
ahi %r0,1
|
||||
j 25f
|
||||
24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
|
||||
25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||
brc 3,12f
|
||||
ahi %r0,-1
|
||||
|
|
|
@ -31,8 +31,27 @@ __kernel_gettimeofday:
|
|||
tml %r4,0x0001 /* pending update ? loop */
|
||||
jnz 1b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lm %r0,%r1,1(%r15)
|
||||
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
s %r0,1(%r15)
|
||||
sl %r1,5(%r15)
|
||||
brc 3,14f
|
||||
ahi %r0,-1
|
||||
14: ltr %r0,%r0 /* past end of steering? */
|
||||
jm 16f
|
||||
srdl %r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 15f
|
||||
lcr %r0,%r0 /* negative TOD offset */
|
||||
lcr %r1,%r1
|
||||
je 15f
|
||||
ahi %r0,-1
|
||||
15: a %r0,1(%r15) /* add TOD timestamp */
|
||||
al %r1,5(%r15)
|
||||
brc 12,17f
|
||||
ahi %r0,1
|
||||
j 17f
|
||||
16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
|
||||
17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||
brc 3,3f
|
||||
ahi %r0,-1
|
||||
|
|
|
@ -83,8 +83,17 @@ __kernel_clock_gettime:
|
|||
tmll %r4,0x0001 /* pending update ? loop */
|
||||
jnz 5b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||
lg %r1,1(%r15)
|
||||
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
slgr %r0,%r1 /* now - ts_steering_end */
|
||||
ltgr %r0,%r0 /* past end of steering ? */
|
||||
jm 17f
|
||||
srlg %r0,%r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 18f
|
||||
lcgr %r0,%r0 /* negative TOD offset */
|
||||
18: algr %r1,%r0 /* add steering offset */
|
||||
17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||
|
|
|
@ -31,7 +31,16 @@ __kernel_gettimeofday:
|
|||
jnz 0b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lg %r1,1(%r15)
|
||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
slgr %r0,%r1 /* now - ts_steering_end */
|
||||
ltgr %r0,%r0 /* past end of steering ? */
|
||||
jm 6f
|
||||
srlg %r0,%r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 7f
|
||||
lcgr %r0,%r0 /* negative TOD offset */
|
||||
7: algr %r1,%r0 /* add steering offset */
|
||||
6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
|
||||
|
|
|
@ -96,7 +96,6 @@ static void update_mt_scaling(void)
|
|||
*/
|
||||
static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, clock, user, system, steal;
|
||||
u64 user_scaled, system_scaled;
|
||||
|
||||
|
@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
|||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||
update_mt_scaling();
|
||||
|
||||
user = S390_lowcore.user_timer - ti->user_timer;
|
||||
user = S390_lowcore.user_timer - tsk->thread.user_timer;
|
||||
S390_lowcore.steal_timer -= user;
|
||||
ti->user_timer = S390_lowcore.user_timer;
|
||||
tsk->thread.user_timer = S390_lowcore.user_timer;
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
system = S390_lowcore.system_timer - tsk->thread.system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
tsk->thread.system_timer = S390_lowcore.system_timer;
|
||||
|
||||
user_scaled = user;
|
||||
system_scaled = system;
|
||||
|
@ -153,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
|||
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
|
||||
do_account_vtime(prev, 0);
|
||||
ti = task_thread_info(prev);
|
||||
ti->user_timer = S390_lowcore.user_timer;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
ti = task_thread_info(current);
|
||||
S390_lowcore.user_timer = ti->user_timer;
|
||||
S390_lowcore.system_timer = ti->system_timer;
|
||||
prev->thread.user_timer = S390_lowcore.user_timer;
|
||||
prev->thread.system_timer = S390_lowcore.system_timer;
|
||||
S390_lowcore.user_timer = current->thread.user_timer;
|
||||
S390_lowcore.system_timer = current->thread.system_timer;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -181,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
|
|||
*/
|
||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, system, system_scaled;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
|
@ -193,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
|||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||
update_mt_scaling();
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
system = S390_lowcore.system_timer - tsk->thread.system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
tsk->thread.system_timer = S390_lowcore.system_timer;
|
||||
system_scaled = system;
|
||||
/* Do MT utilization scaling */
|
||||
if (smp_cpu_mtid) {
|
||||
|
|
|
@ -7,6 +7,45 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
/*
|
||||
* void *memmove(void *dest, const void *src, size_t n)
|
||||
*/
|
||||
ENTRY(memmove)
|
||||
ltgr %r4,%r4
|
||||
lgr %r1,%r2
|
||||
bzr %r14
|
||||
clgr %r2,%r3
|
||||
jnh .Lmemmove_forward
|
||||
la %r5,0(%r4,%r3)
|
||||
clgr %r2,%r5
|
||||
jl .Lmemmove_reverse
|
||||
.Lmemmove_forward:
|
||||
aghi %r4,-1
|
||||
srlg %r0,%r4,8
|
||||
ltgr %r0,%r0
|
||||
jz .Lmemmove_rest
|
||||
.Lmemmove_loop:
|
||||
mvc 0(256,%r1),0(%r3)
|
||||
la %r1,256(%r1)
|
||||
la %r3,256(%r3)
|
||||
brctg %r0,.Lmemmove_loop
|
||||
.Lmemmove_rest:
|
||||
larl %r5,.Lmemmove_mvc
|
||||
ex %r4,0(%r5)
|
||||
br %r14
|
||||
.Lmemmove_reverse:
|
||||
aghi %r4,-1
|
||||
.Lmemmove_reverse_loop:
|
||||
ic %r0,0(%r4,%r3)
|
||||
stc %r0,0(%r4,%r1)
|
||||
brctg %r4,.Lmemmove_reverse_loop
|
||||
ic %r0,0(%r4,%r3)
|
||||
stc %r0,0(%r4,%r1)
|
||||
br %r14
|
||||
.Lmemmove_mvc:
|
||||
mvc 0(1,%r1),0(%r3)
|
||||
EXPORT_SYMBOL(memmove)
|
||||
|
||||
/*
|
||||
* memset implementation
|
||||
*
|
||||
|
|
|
@ -733,6 +733,7 @@ block:
|
|||
* return to userspace schedule() to block. */
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
set_tsk_need_resched(tsk);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
|
|
@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
|
|||
|
||||
if (slab_is_available())
|
||||
return (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
return alloc_bootmem_align(size, size);
|
||||
return (void *) memblock_alloc(size, size);
|
||||
}
|
||||
|
||||
static inline pud_t *vmem_pud_alloc(void)
|
||||
|
@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
|
|||
|
||||
pte_t __ref *vmem_pte_alloc(void)
|
||||
{
|
||||
unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
|
||||
pte_t *pte;
|
||||
|
||||
if (slab_is_available())
|
||||
pte = (pte_t *) page_table_alloc(&init_mm);
|
||||
else
|
||||
pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
|
||||
PTRS_PER_PTE * sizeof(pte_t));
|
||||
pte = (pte_t *) memblock_alloc(size, size);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pte, _PAGE_INVALID,
|
||||
PTRS_PER_PTE * sizeof(pte_t));
|
||||
clear_table((unsigned long *) pte, _PAGE_INVALID, size);
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/node.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -307,13 +308,11 @@ fail:
|
|||
/*
|
||||
* Allocate and initialize core to node mapping
|
||||
*/
|
||||
static void create_core_to_node_map(void)
|
||||
static void __ref create_core_to_node_map(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
|
||||
if (emu_cores == NULL)
|
||||
panic("Could not allocate cores to node memory");
|
||||
emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
|
||||
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
|
||||
emu_cores->to_node_id[i] = NODE_ID_FREE;
|
||||
}
|
||||
|
@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
|
|||
|
||||
phys = toptree_new(TOPTREE_ID_PHYS, 1);
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
top = &per_cpu(cpu_topology, cpu);
|
||||
for_each_cpu(cpu, &cpus_with_topology) {
|
||||
top = &cpu_topology[cpu];
|
||||
node = toptree_get_child(phys, 0);
|
||||
drawer = toptree_get_child(node, top->drawer_id);
|
||||
book = toptree_get_child(drawer, top->book_id);
|
||||
mc = toptree_get_child(book, top->socket_id);
|
||||
core = toptree_get_child(mc, top->core_id);
|
||||
core = toptree_get_child(mc, smp_get_base_cpu(cpu));
|
||||
if (!drawer || !book || !mc || !core)
|
||||
panic("NUMA emulation could not allocate memory");
|
||||
cpumask_set_cpu(cpu, &core->mask);
|
||||
|
@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
|
|||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, &core->mask) {
|
||||
top = &per_cpu(cpu_topology, cpu);
|
||||
top = &cpu_topology[cpu];
|
||||
cpumask_copy(&top->thread_mask, &core->mask);
|
||||
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
|
||||
cpumask_copy(&top->book_mask, &core_book(core)->mask);
|
||||
|
@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void pin_all_possible_cpus(void)
|
||||
{
|
||||
int core_id, node_id, cpu;
|
||||
static int initialized;
|
||||
|
||||
if (initialized)
|
||||
return;
|
||||
print_node_to_core_map();
|
||||
node_id = 0;
|
||||
for_each_possible_cpu(cpu) {
|
||||
core_id = smp_get_base_cpu(cpu);
|
||||
if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
|
||||
continue;
|
||||
pin_core_to_node(core_id, node_id);
|
||||
cpu_topology[cpu].node_id = node_id;
|
||||
node_id = (node_id + 1) % emu_nodes;
|
||||
}
|
||||
print_node_to_core_map();
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transfer physical topology into a NUMA topology and modify CPU masks
|
||||
* according to the NUMA topology.
|
||||
|
@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
|
|||
toptree_free(phys);
|
||||
toptree_to_topology(numa);
|
||||
toptree_free(numa);
|
||||
print_node_to_core_map();
|
||||
pin_all_possible_cpus();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
@ -25,10 +26,14 @@
|
|||
* RETURNS:
|
||||
* Pointer to the new tree node or NULL on error
|
||||
*/
|
||||
struct toptree *toptree_alloc(int level, int id)
|
||||
struct toptree __ref *toptree_alloc(int level, int id)
|
||||
{
|
||||
struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
|
||||
struct toptree *res;
|
||||
|
||||
if (slab_is_available())
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
else
|
||||
res = memblock_virt_alloc(sizeof(*res), 8);
|
||||
if (!res)
|
||||
return res;
|
||||
|
||||
|
@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
|
|||
* cleanly using toptree_remove. Possible children are freed
|
||||
* recursively. In the end @cand itself is freed.
|
||||
*/
|
||||
void toptree_free(struct toptree *cand)
|
||||
void __ref toptree_free(struct toptree *cand)
|
||||
{
|
||||
struct toptree *child, *tmp;
|
||||
|
||||
|
@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
|
|||
toptree_remove(cand);
|
||||
toptree_for_each_child_safe(child, tmp, cand)
|
||||
toptree_free(child);
|
||||
kfree(cand);
|
||||
if (slab_is_available())
|
||||
kfree(cand);
|
||||
else
|
||||
memblock_free_early((unsigned long)cand, sizeof(*cand));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
|
|||
|
||||
static int zpci_alloc_domain(struct zpci_dev *zdev)
|
||||
{
|
||||
if (zpci_unique_uid) {
|
||||
zdev->domain = (u16) zdev->uid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&zpci_domain_lock);
|
||||
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
|
||||
if (zdev->domain == ZPCI_NR_DEVICES) {
|
||||
|
@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
|
|||
|
||||
static void zpci_free_domain(struct zpci_dev *zdev)
|
||||
{
|
||||
if (zpci_unique_uid)
|
||||
return;
|
||||
|
||||
spin_lock(&zpci_domain_lock);
|
||||
clear_bit(zdev->domain, zpci_domain);
|
||||
spin_unlock(&zpci_domain_lock);
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#include <asm/clp.h>
|
||||
#include <uapi/asm/clp.h>
|
||||
|
||||
bool zpci_unique_uid;
|
||||
|
||||
static inline void zpci_err_clp(unsigned int rsp, int rc)
|
||||
{
|
||||
struct {
|
||||
|
@ -315,6 +317,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
|
|||
goto out;
|
||||
}
|
||||
|
||||
zpci_unique_uid = rrb->response.uid_checking;
|
||||
WARN_ON_ONCE(rrb->response.entry_size !=
|
||||
sizeof(struct clp_fh_list_entry));
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
|
||||
seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
|
||||
seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
|
||||
atomic64_read(counter));
|
||||
}
|
||||
|
||||
|
|
|
@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
|
|||
/*
|
||||
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
|
||||
* translations when previously invalid translation-table entries are
|
||||
* validated. With lazy unmap, it also is skipped for previously valid
|
||||
* validated. With lazy unmap, rpcit is skipped for previously valid
|
||||
* entries, but a global rpcit is then required before any address can
|
||||
* be re-used, i.e. after each iommu bitmap wrap-around.
|
||||
*/
|
||||
if (!zdev->tlb_refresh &&
|
||||
(!s390_iommu_strict ||
|
||||
((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
|
||||
return 0;
|
||||
if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
|
||||
if (!zdev->tlb_refresh)
|
||||
return 0;
|
||||
} else {
|
||||
if (!s390_iommu_strict)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
|
||||
PAGE_ALIGN(size));
|
||||
|
@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
|
|||
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
|
||||
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
|
||||
if (offset == -1) {
|
||||
if (!zdev->tlb_refresh && !s390_iommu_strict) {
|
||||
if (!s390_iommu_strict) {
|
||||
/* global flush before DMA addresses are reused */
|
||||
if (zpci_refresh_global(zdev))
|
||||
goto out_error;
|
||||
|
@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
|
|||
if (!zdev->iommu_bitmap)
|
||||
goto out;
|
||||
|
||||
if (zdev->tlb_refresh || s390_iommu_strict)
|
||||
if (s390_iommu_strict)
|
||||
bitmap_clear(zdev->iommu_bitmap, offset, size);
|
||||
else
|
||||
bitmap_set(zdev->lazy_bitmap, offset, size);
|
||||
|
@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
|
|||
return NULL;
|
||||
|
||||
pa = page_to_phys(page);
|
||||
memset((void *) pa, 0, size);
|
||||
|
||||
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
|
||||
if (dma_mapping_error(dev, map)) {
|
||||
free_pages(pa, get_order(size));
|
||||
|
@ -419,6 +420,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
size_t size, dma_addr_t *handle,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
|
||||
dma_addr_t dma_addr_base, dma_addr;
|
||||
int flags = ZPCI_PTE_VALID;
|
||||
|
@ -426,8 +428,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
unsigned long pa = 0;
|
||||
int ret;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
|
||||
dma_addr_base = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr_base == DMA_ERROR_CODE)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
flags |= ZPCI_TABLE_PROTECTED;
|
||||
|
||||
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
|
||||
pa = page_to_phys(sg_page(s)) + s->offset;
|
||||
ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
|
||||
pa = page_to_phys(sg_page(s));
|
||||
ret = __dma_update_trans(zdev, pa, dma_addr,
|
||||
s->offset + s->length, flags);
|
||||
if (ret)
|
||||
goto unmap;
|
||||
|
||||
dma_addr += s->length;
|
||||
dma_addr += s->offset + s->length;
|
||||
}
|
||||
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
|
||||
if (ret)
|
||||
goto unmap;
|
||||
|
||||
*handle = dma_addr_base;
|
||||
atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
|
||||
atomic64_add(nr_pages, &zdev->mapped_pages);
|
||||
|
||||
return ret;
|
||||
|
||||
unmap:
|
||||
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
|
||||
ZPCI_PTE_INVALID);
|
||||
dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
|
||||
dma_free_address(dev, dma_addr_base, nr_pages);
|
||||
zpci_err("map error:\n");
|
||||
zpci_err_dma(ret, pa);
|
||||
return ret;
|
||||
|
@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
|||
rc = -ENOMEM;
|
||||
goto free_dma_table;
|
||||
}
|
||||
if (!zdev->tlb_refresh && !s390_iommu_strict) {
|
||||
if (!s390_iommu_strict) {
|
||||
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
|
||||
if (!zdev->lazy_bitmap) {
|
||||
rc = -ENOMEM;
|
||||
|
|
|
@ -9,7 +9,5 @@ define filechk_facilities.h
|
|||
$(obj)/gen_facilities
|
||||
endef
|
||||
|
||||
$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
|
||||
|
||||
include/generated/facilities.h: $(obj)/gen_facilities FORCE
|
||||
$(call filechk,facilities.h)
|
||||
|
|
|
@ -7,13 +7,83 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define S390_GEN_FACILITIES_C
|
||||
|
||||
#include <strings.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <asm/facilities_src.h>
|
||||
|
||||
struct facility_def {
|
||||
char *name;
|
||||
int *bits;
|
||||
};
|
||||
|
||||
static struct facility_def facility_defs[] = {
|
||||
{
|
||||
/*
|
||||
* FACILITIES_ALS contains the list of facilities that are
|
||||
* required to run a kernel that is compiled e.g. with
|
||||
* -march=<machine>.
|
||||
*/
|
||||
.name = "FACILITIES_ALS",
|
||||
.bits = (int[]){
|
||||
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
|
||||
0, /* N3 instructions */
|
||||
1, /* z/Arch mode installed */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
18, /* long displacement facility */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
7, /* stfle */
|
||||
17, /* message security assist */
|
||||
21, /* extended-immediate facility */
|
||||
25, /* store clock fast */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
27, /* mvcos */
|
||||
32, /* compare and swap and store */
|
||||
33, /* compare and swap and store 2 */
|
||||
34, /* general extension facility */
|
||||
35, /* execute extensions */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
45, /* fast-BCR, etc. */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
49, /* misc-instruction-extensions */
|
||||
52, /* interlocked facility 2 */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
|
||||
53, /* load-and-zero-rightmost-byte, etc. */
|
||||
#endif
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "FACILITIES_KVM",
|
||||
.bits = (int[]){
|
||||
0, /* N3 instructions */
|
||||
1, /* z/Arch mode installed */
|
||||
2, /* z/Arch mode active */
|
||||
3, /* DAT-enhancement */
|
||||
4, /* idte segment table */
|
||||
5, /* idte region table */
|
||||
6, /* ASN-and-LX reuse */
|
||||
7, /* stfle */
|
||||
8, /* enhanced-DAT 1 */
|
||||
9, /* sense-running-status */
|
||||
10, /* conditional sske */
|
||||
13, /* ipte-range */
|
||||
14, /* nonquiescing key-setting */
|
||||
73, /* transactional execution */
|
||||
75, /* access-exception-fetch/store indication */
|
||||
76, /* msa extension 3 */
|
||||
77, /* msa extension 4 */
|
||||
78, /* enhanced-DAT 2 */
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static void print_facility_list(struct facility_def *def)
|
||||
{
|
||||
|
|
|
@ -5,12 +5,13 @@
|
|||
*
|
||||
* Author(s):
|
||||
* Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*
|
||||
* License: GPL
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zpci"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -21,10 +22,6 @@
|
|||
#define SLOT_NAME_SIZE 10
|
||||
static LIST_HEAD(s390_hotplug_slot_list);
|
||||
|
||||
MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
|
||||
MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static int zpci_fn_configured(enum zpci_state state)
|
||||
{
|
||||
return state == ZPCI_FN_STATE_CONFIGURED ||
|
||||
|
|
|
@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *);
|
|||
static void do_kick_device(struct work_struct *);
|
||||
static void do_restore_device(struct work_struct *);
|
||||
static void do_reload_device(struct work_struct *);
|
||||
static void do_requeue_requests(struct work_struct *);
|
||||
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
|
||||
static void dasd_device_timeout(unsigned long);
|
||||
static void dasd_block_timeout(unsigned long);
|
||||
|
@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void)
|
|||
INIT_WORK(&device->kick_work, do_kick_device);
|
||||
INIT_WORK(&device->restore_device, do_restore_device);
|
||||
INIT_WORK(&device->reload_device, do_reload_device);
|
||||
INIT_WORK(&device->requeue_requests, do_requeue_requests);
|
||||
device->state = DASD_STATE_NEW;
|
||||
device->target = DASD_STATE_NEW;
|
||||
mutex_init(&device->state_mutex);
|
||||
|
@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
cqr->starttime = jiffies;
|
||||
cqr->retries--;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
|
||||
cqr->lpm &= device->path_data.opm;
|
||||
cqr->lpm &= dasd_path_get_opm(device);
|
||||
if (!cqr->lpm)
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->lpm = dasd_path_get_opm(device);
|
||||
}
|
||||
if (cqr->cpmode == 1) {
|
||||
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
|
||||
|
@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"start_IO: selected paths gone (%x)",
|
||||
cqr->lpm);
|
||||
} else if (cqr->lpm != device->path_data.opm) {
|
||||
cqr->lpm = device->path_data.opm;
|
||||
} else if (cqr->lpm != dasd_path_get_opm(device)) {
|
||||
cqr->lpm = dasd_path_get_opm(device);
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
"start_IO: selected paths gone,"
|
||||
" retry on all paths");
|
||||
|
@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
"start_IO: all paths in opm gone,"
|
||||
" do path verification");
|
||||
dasd_generic_last_path_gone(device);
|
||||
device->path_data.opm = 0;
|
||||
device->path_data.ppm = 0;
|
||||
device->path_data.npm = 0;
|
||||
device->path_data.tbvpm =
|
||||
ccw_device_get_path_mask(device->cdev);
|
||||
dasd_path_no_path(device);
|
||||
dasd_path_set_tbvpm(device,
|
||||
ccw_device_get_path_mask(
|
||||
device->cdev));
|
||||
}
|
||||
break;
|
||||
case -ENODEV:
|
||||
|
@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
|
||||
|
||||
static int dasd_check_hpf_error(struct irb *irb)
|
||||
{
|
||||
return (scsw_tm_is_valid_schxs(&irb->scsw) &&
|
||||
(irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
|
||||
irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt handler for "normal" ssch-io based dasd devices.
|
||||
*/
|
||||
|
@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
switch (PTR_ERR(irb)) {
|
||||
case -EIO:
|
||||
if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
|
||||
device = (struct dasd_device *) cqr->startdev;
|
||||
device = cqr->startdev;
|
||||
cqr->status = DASD_CQR_CLEARED;
|
||||
dasd_device_clear_timer(device);
|
||||
wake_up(&dasd_flush_wq);
|
||||
|
@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
struct dasd_ccw_req, devlist);
|
||||
}
|
||||
} else { /* error */
|
||||
/* check for HPF error
|
||||
* call discipline function to requeue all requests
|
||||
* and disable HPF accordingly
|
||||
*/
|
||||
if (cqr->cpmode && dasd_check_hpf_error(irb) &&
|
||||
device->discipline->handle_hpf_error)
|
||||
device->discipline->handle_hpf_error(device, irb);
|
||||
/*
|
||||
* If we don't want complex ERP for this request, then just
|
||||
* reset this and retry it in the fastpath
|
||||
*/
|
||||
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
|
||||
cqr->retries > 0) {
|
||||
if (cqr->lpm == device->path_data.opm)
|
||||
if (cqr->lpm == dasd_path_get_opm(device))
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"default ERP in fastpath "
|
||||
"(%i retries left)",
|
||||
cqr->retries);
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->lpm = dasd_path_get_opm(device);
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
next = cqr;
|
||||
} else
|
||||
|
@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
|
|||
{
|
||||
int rc;
|
||||
|
||||
if (device->path_data.tbvpm) {
|
||||
if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
|
||||
DASD_UNRESUMED_PM))
|
||||
return;
|
||||
rc = device->discipline->verify_path(
|
||||
device, device->path_data.tbvpm);
|
||||
if (rc)
|
||||
dasd_device_set_timer(device, 50);
|
||||
else
|
||||
device->path_data.tbvpm = 0;
|
||||
}
|
||||
if (!dasd_path_get_tbvpm(device))
|
||||
return;
|
||||
|
||||
if (device->stopped &
|
||||
~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
|
||||
return;
|
||||
rc = device->discipline->verify_path(device,
|
||||
dasd_path_get_tbvpm(device));
|
||||
if (rc)
|
||||
dasd_device_set_timer(device, 50);
|
||||
else
|
||||
dasd_path_clear_all_verify(device);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
|
|||
|
||||
if (!block)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&block->queue_lock, flags);
|
||||
spin_lock_irqsave(&block->request_queue_lock, flags);
|
||||
req = (struct request *) cqr->callback_data;
|
||||
blk_requeue_request(block->request_queue, req);
|
||||
spin_unlock_irqrestore(&block->queue_lock, flags);
|
||||
spin_unlock_irqrestore(&block->request_queue_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
|
|||
*/
|
||||
static void dasd_setup_queue(struct dasd_block *block)
|
||||
{
|
||||
struct request_queue *q = block->request_queue;
|
||||
int max;
|
||||
|
||||
if (block->base->features & DASD_FEATURE_USERAW) {
|
||||
|
@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block)
|
|||
} else {
|
||||
max = block->base->discipline->max_blocks << block->s2b_shift;
|
||||
}
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
|
||||
block->request_queue->limits.max_dev_sectors = max;
|
||||
blk_queue_logical_block_size(block->request_queue,
|
||||
block->bp_block);
|
||||
blk_queue_max_hw_sectors(block->request_queue, max);
|
||||
blk_queue_max_segments(block->request_queue, -1L);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
q->limits.max_dev_sectors = max;
|
||||
blk_queue_logical_block_size(q, block->bp_block);
|
||||
blk_queue_max_hw_sectors(q, max);
|
||||
blk_queue_max_segments(q, USHRT_MAX);
|
||||
/* with page sized segments we can translate each segement into
|
||||
* one idaw/tidaw
|
||||
*/
|
||||
blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
|
||||
blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
|
||||
blk_queue_max_segment_size(q, PAGE_SIZE);
|
||||
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|||
struct dasd_device *device;
|
||||
struct dasd_block *block;
|
||||
int max_count, open_count, rc;
|
||||
unsigned long flags;
|
||||
|
||||
rc = 0;
|
||||
device = dasd_device_from_cdev(cdev);
|
||||
if (IS_ERR(device))
|
||||
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
if (IS_ERR(device)) {
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
||||
return PTR_ERR(device);
|
||||
}
|
||||
|
||||
/*
|
||||
* We must make sure that this device is currently not in use.
|
||||
|
@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|||
pr_warn("%s: The DASD cannot be set offline while it is in use\n",
|
||||
dev_name(&cdev->dev));
|
||||
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
|
||||
dasd_put_device(device);
|
||||
return -EBUSY;
|
||||
goto out_busy;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|||
* could only be called by normal offline so safe_offline flag
|
||||
* needs to be removed to run normal offline and kill all I/O
|
||||
*/
|
||||
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
|
||||
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
|
||||
/* Already doing normal offline processing */
|
||||
dasd_put_device(device);
|
||||
return -EBUSY;
|
||||
} else
|
||||
goto out_busy;
|
||||
else
|
||||
clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
|
||||
|
||||
} else
|
||||
if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
|
||||
} else {
|
||||
if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
|
||||
/* Already doing offline processing */
|
||||
dasd_put_device(device);
|
||||
return -EBUSY;
|
||||
}
|
||||
goto out_busy;
|
||||
}
|
||||
|
||||
set_bit(DASD_FLAG_OFFLINE, &device->flags);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
||||
|
||||
/*
|
||||
* if safe_offline called set safe_offline_running flag and
|
||||
|
@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|||
goto interrupted;
|
||||
}
|
||||
|
||||
set_bit(DASD_FLAG_OFFLINE, &device->flags);
|
||||
dasd_set_target_state(device, DASD_STATE_NEW);
|
||||
/* dasd_delete_device destroys the device reference. */
|
||||
block = device->block;
|
||||
|
@ -3610,7 +3628,14 @@ interrupted:
|
|||
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
|
||||
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
|
||||
dasd_put_device(device);
|
||||
|
||||
return rc;
|
||||
|
||||
out_busy:
|
||||
dasd_put_device(device);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
|
||||
|
||||
|
@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
|
|||
case CIO_GONE:
|
||||
case CIO_BOXED:
|
||||
case CIO_NO_PATH:
|
||||
device->path_data.opm = 0;
|
||||
device->path_data.ppm = 0;
|
||||
device->path_data.npm = 0;
|
||||
dasd_path_no_path(device);
|
||||
ret = dasd_generic_last_path_gone(device);
|
||||
break;
|
||||
case CIO_OPER:
|
||||
ret = 1;
|
||||
if (device->path_data.opm)
|
||||
if (dasd_path_get_opm(device))
|
||||
ret = dasd_generic_path_operational(device);
|
||||
break;
|
||||
}
|
||||
|
@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify);
|
|||
|
||||
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
|
||||
{
|
||||
int chp;
|
||||
__u8 oldopm, eventlpm;
|
||||
struct dasd_device *device;
|
||||
int chp, oldopm, hpfpm, ifccpm;
|
||||
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
if (IS_ERR(device))
|
||||
return;
|
||||
|
||||
oldopm = dasd_path_get_opm(device);
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
eventlpm = 0x80 >> chp;
|
||||
if (path_event[chp] & PE_PATH_GONE) {
|
||||
oldopm = device->path_data.opm;
|
||||
device->path_data.opm &= ~eventlpm;
|
||||
device->path_data.ppm &= ~eventlpm;
|
||||
device->path_data.npm &= ~eventlpm;
|
||||
if (oldopm && !device->path_data.opm) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"No verified channel paths remain "
|
||||
"for the device\n");
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"%s", "last verified path gone");
|
||||
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
||||
dasd_device_set_stop_bits(device,
|
||||
DASD_STOPPED_DC_WAIT);
|
||||
}
|
||||
dasd_path_notoper(device, chp);
|
||||
}
|
||||
if (path_event[chp] & PE_PATH_AVAILABLE) {
|
||||
device->path_data.opm &= ~eventlpm;
|
||||
device->path_data.ppm &= ~eventlpm;
|
||||
device->path_data.npm &= ~eventlpm;
|
||||
device->path_data.tbvpm |= eventlpm;
|
||||
dasd_path_available(device, chp);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
|
||||
if (!(device->path_data.opm & eventlpm) &&
|
||||
!(device->path_data.tbvpm & eventlpm)) {
|
||||
if (!dasd_path_is_operational(device, chp) &&
|
||||
!dasd_path_need_verify(device, chp)) {
|
||||
/*
|
||||
* we can not establish a pathgroup on an
|
||||
* unavailable path, so trigger a path
|
||||
* verification first
|
||||
*/
|
||||
device->path_data.tbvpm |= eventlpm;
|
||||
dasd_schedule_device_bh(device);
|
||||
dasd_path_available(device, chp);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"Pathgroup re-established\n");
|
||||
|
@ -3742,28 +3749,154 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
|
|||
device->discipline->kick_validate(device);
|
||||
}
|
||||
}
|
||||
hpfpm = dasd_path_get_hpfpm(device);
|
||||
ifccpm = dasd_path_get_ifccpm(device);
|
||||
if (!dasd_path_get_opm(device) && hpfpm) {
|
||||
/*
|
||||
* device has no operational paths but at least one path is
|
||||
* disabled due to HPF errors
|
||||
* disable HPF at all and use the path(s) again
|
||||
*/
|
||||
if (device->discipline->disable_hpf)
|
||||
device->discipline->disable_hpf(device);
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
|
||||
dasd_path_set_tbvpm(device, hpfpm);
|
||||
dasd_schedule_device_bh(device);
|
||||
dasd_schedule_requeue(device);
|
||||
} else if (!dasd_path_get_opm(device) && ifccpm) {
|
||||
/*
|
||||
* device has no operational paths but at least one path is
|
||||
* disabled due to IFCC errors
|
||||
* trigger path verification on paths with IFCC errors
|
||||
*/
|
||||
dasd_path_set_tbvpm(device, ifccpm);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"No verified channel paths remain for the device\n");
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"%s", "last verified path gone");
|
||||
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
||||
dasd_device_set_stop_bits(device,
|
||||
DASD_STOPPED_DC_WAIT);
|
||||
}
|
||||
dasd_put_device(device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
|
||||
|
||||
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
if (!device->path_data.opm && lpm) {
|
||||
device->path_data.opm = lpm;
|
||||
if (!dasd_path_get_opm(device) && lpm) {
|
||||
dasd_path_set_opm(device, lpm);
|
||||
dasd_generic_path_operational(device);
|
||||
} else
|
||||
device->path_data.opm |= lpm;
|
||||
dasd_path_add_opm(device, lpm);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
|
||||
|
||||
/*
|
||||
* clear active requests and requeue them to block layer if possible
|
||||
*/
|
||||
static int dasd_generic_requeue_all_requests(struct dasd_device *device)
|
||||
{
|
||||
struct list_head requeue_queue;
|
||||
struct dasd_ccw_req *cqr, *n;
|
||||
struct dasd_ccw_req *refers;
|
||||
int rc;
|
||||
|
||||
INIT_LIST_HEAD(&requeue_queue);
|
||||
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
||||
rc = 0;
|
||||
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
|
||||
/* Check status and move request to flush_queue */
|
||||
if (cqr->status == DASD_CQR_IN_IO) {
|
||||
rc = device->discipline->term_IO(cqr);
|
||||
if (rc) {
|
||||
/* unable to terminate requeust */
|
||||
dev_err(&device->cdev->dev,
|
||||
"Unable to terminate request %p "
|
||||
"on suspend\n", cqr);
|
||||
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
||||
dasd_put_device(device);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
list_move_tail(&cqr->devlist, &requeue_queue);
|
||||
}
|
||||
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
||||
|
||||
list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
|
||||
wait_event(dasd_flush_wq,
|
||||
(cqr->status != DASD_CQR_CLEAR_PENDING));
|
||||
|
||||
/* mark sleepon requests as ended */
|
||||
if (cqr->callback_data == DASD_SLEEPON_START_TAG)
|
||||
cqr->callback_data = DASD_SLEEPON_END_TAG;
|
||||
|
||||
/* remove requests from device and block queue */
|
||||
list_del_init(&cqr->devlist);
|
||||
while (cqr->refers != NULL) {
|
||||
refers = cqr->refers;
|
||||
/* remove the request from the block queue */
|
||||
list_del(&cqr->blocklist);
|
||||
/* free the finished erp request */
|
||||
dasd_free_erp_request(cqr, cqr->memdev);
|
||||
cqr = refers;
|
||||
}
|
||||
|
||||
/*
|
||||
* requeue requests to blocklayer will only work
|
||||
* for block device requests
|
||||
*/
|
||||
if (_dasd_requeue_request(cqr))
|
||||
continue;
|
||||
|
||||
if (cqr->block)
|
||||
list_del_init(&cqr->blocklist);
|
||||
cqr->block->base->discipline->free_cp(
|
||||
cqr, (struct request *) cqr->callback_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* if requests remain then they are internal request
|
||||
* and go back to the device queue
|
||||
*/
|
||||
if (!list_empty(&requeue_queue)) {
|
||||
/* move freeze_queue to start of the ccw_queue */
|
||||
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
||||
list_splice_tail(&requeue_queue, &device->ccw_queue);
|
||||
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
||||
}
|
||||
/* wake up generic waitqueue for eventually ended sleepon requests */
|
||||
wake_up(&generic_waitq);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void do_requeue_requests(struct work_struct *work)
|
||||
{
|
||||
struct dasd_device *device = container_of(work, struct dasd_device,
|
||||
requeue_requests);
|
||||
dasd_generic_requeue_all_requests(device);
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
|
||||
if (device->block)
|
||||
dasd_schedule_block_bh(device->block);
|
||||
dasd_put_device(device);
|
||||
}
|
||||
|
||||
void dasd_schedule_requeue(struct dasd_device *device)
|
||||
{
|
||||
dasd_get_device(device);
|
||||
/* queue call to dasd_reload_device to the kernel event daemon. */
|
||||
if (!schedule_work(&device->requeue_requests))
|
||||
dasd_put_device(device);
|
||||
}
|
||||
EXPORT_SYMBOL(dasd_schedule_requeue);
|
||||
|
||||
int dasd_generic_pm_freeze(struct ccw_device *cdev)
|
||||
{
|
||||
struct dasd_device *device = dasd_device_from_cdev(cdev);
|
||||
struct list_head freeze_queue;
|
||||
struct dasd_ccw_req *cqr, *n;
|
||||
struct dasd_ccw_req *refers;
|
||||
int rc;
|
||||
|
||||
if (IS_ERR(device))
|
||||
|
@ -3778,67 +3911,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
|
|||
/* disallow new I/O */
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
|
||||
|
||||
/* clear active requests and requeue them to block layer if possible */
|
||||
INIT_LIST_HEAD(&freeze_queue);
|
||||
spin_lock_irq(get_ccwdev_lock(cdev));
|
||||
rc = 0;
|
||||
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
|
||||
/* Check status and move request to flush_queue */
|
||||
if (cqr->status == DASD_CQR_IN_IO) {
|
||||
rc = device->discipline->term_IO(cqr);
|
||||
if (rc) {
|
||||
/* unable to terminate requeust */
|
||||
dev_err(&device->cdev->dev,
|
||||
"Unable to terminate request %p "
|
||||
"on suspend\n", cqr);
|
||||
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||
dasd_put_device(device);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
list_move_tail(&cqr->devlist, &freeze_queue);
|
||||
}
|
||||
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||
|
||||
list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
|
||||
wait_event(dasd_flush_wq,
|
||||
(cqr->status != DASD_CQR_CLEAR_PENDING));
|
||||
if (cqr->status == DASD_CQR_CLEARED)
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
|
||||
/* requeue requests to blocklayer will only work for
|
||||
block device requests */
|
||||
if (_dasd_requeue_request(cqr))
|
||||
continue;
|
||||
|
||||
/* remove requests from device and block queue */
|
||||
list_del_init(&cqr->devlist);
|
||||
while (cqr->refers != NULL) {
|
||||
refers = cqr->refers;
|
||||
/* remove the request from the block queue */
|
||||
list_del(&cqr->blocklist);
|
||||
/* free the finished erp request */
|
||||
dasd_free_erp_request(cqr, cqr->memdev);
|
||||
cqr = refers;
|
||||
}
|
||||
if (cqr->block)
|
||||
list_del_init(&cqr->blocklist);
|
||||
cqr->block->base->discipline->free_cp(
|
||||
cqr, (struct request *) cqr->callback_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* if requests remain then they are internal request
|
||||
* and go back to the device queue
|
||||
*/
|
||||
if (!list_empty(&freeze_queue)) {
|
||||
/* move freeze_queue to start of the ccw_queue */
|
||||
spin_lock_irq(get_ccwdev_lock(cdev));
|
||||
list_splice_tail(&freeze_queue, &device->ccw_queue);
|
||||
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||
}
|
||||
dasd_put_device(device);
|
||||
return rc;
|
||||
return dasd_generic_requeue_all_requests(device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
|
|||
opm = ccw_device_get_path_mask(device->cdev);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
if (erp->lpm == 0)
|
||||
erp->lpm = device->path_data.opm &
|
||||
erp->lpm = dasd_path_get_opm(device) &
|
||||
~(erp->irb.esw.esw0.sublog.lpum);
|
||||
else
|
||||
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
|
||||
|
@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
|
|||
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
|
||||
erp->status = DASD_CQR_FILLED;
|
||||
erp->retries = 10;
|
||||
erp->lpm = erp->startdev->path_data.opm;
|
||||
erp->lpm = dasd_path_get_opm(erp->startdev);
|
||||
erp->function = dasd_3990_erp_action_1_sec;
|
||||
}
|
||||
return erp;
|
||||
|
@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
|
|||
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
|
||||
/* reset the lpm and the status to be able to
|
||||
* try further actions. */
|
||||
erp->lpm = erp->startdev->path_data.opm;
|
||||
erp->lpm = dasd_path_get_opm(erp->startdev);
|
||||
erp->status = DASD_CQR_NEED_ERP;
|
||||
}
|
||||
}
|
||||
|
@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
|
|||
|
||||
} /* end dasd_3990_erp_inspect_32 */
|
||||
|
||||
static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
|
||||
{
|
||||
int pos = pathmask_to_pos(lpum);
|
||||
|
||||
/* no remaining path, cannot disable */
|
||||
if (!(dasd_path_get_opm(device) & ~lpum))
|
||||
return;
|
||||
|
||||
dev_err(&device->cdev->dev,
|
||||
"Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
|
||||
device->path[pos].cssid, device->path[pos].chpid, lpum);
|
||||
dasd_path_remove_opm(device, lpum);
|
||||
dasd_path_add_ifccpm(device, lpum);
|
||||
device->path[pos].errorclk = 0;
|
||||
atomic_set(&device->path[pos].error_count, 0);
|
||||
}
|
||||
|
||||
static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
|
||||
{
|
||||
struct dasd_device *device = erp->startdev;
|
||||
__u8 lpum = erp->refers->irb.esw.esw1.lpum;
|
||||
int pos = pathmask_to_pos(lpum);
|
||||
unsigned long long clk;
|
||||
|
||||
if (!device->path_thrhld)
|
||||
return;
|
||||
|
||||
clk = get_tod_clock();
|
||||
/*
|
||||
* check if the last error is longer ago than the timeout,
|
||||
* if so reset error state
|
||||
*/
|
||||
if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
|
||||
>= device->path_interval) {
|
||||
atomic_set(&device->path[pos].error_count, 0);
|
||||
device->path[pos].errorclk = 0;
|
||||
}
|
||||
atomic_inc(&device->path[pos].error_count);
|
||||
device->path[pos].errorclk = clk;
|
||||
/* threshold exceeded disable path if possible */
|
||||
if (atomic_read(&device->path[pos].error_count) >=
|
||||
device->path_thrhld)
|
||||
dasd_3990_erp_disable_path(device, lpum);
|
||||
}
|
||||
|
||||
/*
|
||||
*****************************************************************************
|
||||
* main ERP control functions (24 and 32 byte sense)
|
||||
|
@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
|
|||
| SCHN_STAT_CHN_CTRL_CHK)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"channel or interface control check");
|
||||
dasd_3990_erp_account_error(erp);
|
||||
erp = dasd_3990_erp_action_4(erp, NULL);
|
||||
}
|
||||
return erp;
|
||||
|
|
|
@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
|
|||
static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int val;
|
||||
char *endp;
|
||||
unsigned int val;
|
||||
int rc;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
val = simple_strtoul(buf, &endp, 0);
|
||||
if (((endp + 1) < (buf + count)) || (val > 1))
|
||||
if (kstrtouint(buf, 0, &val) || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (val)
|
||||
devmap->features |= DASD_FEATURE_FAILFAST;
|
||||
else
|
||||
devmap->features &= ~DASD_FEATURE_FAILFAST;
|
||||
if (devmap->device)
|
||||
devmap->device->features = devmap->features;
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
return count;
|
||||
rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
|
||||
|
||||
return rc ? : count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
|
||||
|
@ -771,32 +759,41 @@ static ssize_t
|
|||
dasd_ro_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct dasd_device *device;
|
||||
int val;
|
||||
char *endp;
|
||||
unsigned long flags;
|
||||
unsigned int val;
|
||||
int rc;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
val = simple_strtoul(buf, &endp, 0);
|
||||
if (((endp + 1) < (buf + count)) || (val > 1))
|
||||
if (kstrtouint(buf, 0, &val) || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (val)
|
||||
devmap->features |= DASD_FEATURE_READONLY;
|
||||
else
|
||||
devmap->features &= ~DASD_FEATURE_READONLY;
|
||||
device = devmap->device;
|
||||
if (device) {
|
||||
device->features = devmap->features;
|
||||
val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
|
||||
rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
device = dasd_device_from_cdev(cdev);
|
||||
if (IS_ERR(device))
|
||||
return PTR_ERR(device);
|
||||
|
||||
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
||||
val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
|
||||
|
||||
if (!device->block || !device->block->gdp ||
|
||||
test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
||||
goto out;
|
||||
}
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
if (device && device->block && device->block->gdp)
|
||||
set_disk_ro(device->block->gdp, val);
|
||||
/* Increase open_count to avoid losing the block device */
|
||||
atomic_inc(&device->block->open_count);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
||||
|
||||
set_disk_ro(device->block->gdp, val);
|
||||
atomic_dec(&device->block->open_count);
|
||||
|
||||
out:
|
||||
dasd_put_device(device);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -823,27 +820,15 @@ static ssize_t
|
|||
dasd_erplog_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int val;
|
||||
char *endp;
|
||||
unsigned int val;
|
||||
int rc;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
val = simple_strtoul(buf, &endp, 0);
|
||||
if (((endp + 1) < (buf + count)) || (val > 1))
|
||||
if (kstrtouint(buf, 0, &val) || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (val)
|
||||
devmap->features |= DASD_FEATURE_ERPLOG;
|
||||
else
|
||||
devmap->features &= ~DASD_FEATURE_ERPLOG;
|
||||
if (devmap->device)
|
||||
devmap->device->features = devmap->features;
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
return count;
|
||||
rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
|
||||
|
||||
return rc ? : count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
|
||||
|
@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
|
|||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
unsigned int val;
|
||||
ssize_t rc;
|
||||
int val;
|
||||
char *endp;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
val = simple_strtoul(buf, &endp, 0);
|
||||
if (((endp + 1) < (buf + count)) || (val > 1))
|
||||
if (kstrtouint(buf, 0, &val) || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
|
@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
|
|||
if (IS_ERR(device))
|
||||
return PTR_ERR(device);
|
||||
|
||||
if (device->discipline->host_access_count)
|
||||
count = device->discipline->host_access_count(device);
|
||||
else
|
||||
if (!device->discipline)
|
||||
count = -ENODEV;
|
||||
else if (!device->discipline->host_access_count)
|
||||
count = -EOPNOTSUPP;
|
||||
else
|
||||
count = device->discipline->host_access_count(device);
|
||||
|
||||
dasd_put_device(device);
|
||||
if (count < 0)
|
||||
|
@ -1197,27 +1182,25 @@ static ssize_t
|
|||
dasd_eer_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
int val, rc;
|
||||
char *endp;
|
||||
struct dasd_device *device;
|
||||
unsigned int val;
|
||||
int rc = 0;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
if (!devmap->device)
|
||||
return -ENODEV;
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return PTR_ERR(device);
|
||||
|
||||
val = simple_strtoul(buf, &endp, 0);
|
||||
if (((endp + 1) < (buf + count)) || (val > 1))
|
||||
if (kstrtouint(buf, 0, &val) || val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (val) {
|
||||
rc = dasd_eer_enable(devmap->device);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else
|
||||
dasd_eer_disable(devmap->device);
|
||||
return count;
|
||||
if (val)
|
||||
rc = dasd_eer_enable(device);
|
||||
else
|
||||
dasd_eer_disable(device);
|
||||
|
||||
dasd_put_device(device);
|
||||
|
||||
return rc ? : count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
|
||||
|
@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
|
|||
static DEVICE_ATTR(timeout, 0644,
|
||||
dasd_timeout_show, dasd_timeout_store);
|
||||
|
||||
|
||||
static ssize_t
|
||||
dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
unsigned int val;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
|
||||
if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
|
||||
val = 0;
|
||||
|
||||
if (device->discipline && device->discipline->reset_path)
|
||||
device->discipline->reset_path(device, (__u8) val);
|
||||
|
||||
dasd_put_device(device);
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
|
||||
|
||||
static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
int hpf;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
if (!device->discipline || !device->discipline->hpf_enabled) {
|
||||
dasd_put_device(device);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
|
||||
}
|
||||
hpf = device->discipline->hpf_enabled(device);
|
||||
dasd_put_device(device);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
|
||||
|
||||
static ssize_t dasd_reservation_policy_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_devmap *devmap;
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
int rc;
|
||||
|
||||
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
rc = 0;
|
||||
spin_lock(&dasd_devmap_lock);
|
||||
if (sysfs_streq("ignore", buf))
|
||||
devmap->features &= ~DASD_FEATURE_FAILONSLCK;
|
||||
rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
|
||||
else if (sysfs_streq("fail", buf))
|
||||
devmap->features |= DASD_FEATURE_FAILONSLCK;
|
||||
rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
|
||||
else
|
||||
rc = -EINVAL;
|
||||
if (devmap->device)
|
||||
devmap->device->features = devmap->features;
|
||||
spin_unlock(&dasd_devmap_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
else
|
||||
return count;
|
||||
|
||||
return rc ? : count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(reservation_policy, 0644,
|
||||
|
@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
u8 opm, nppm, cablepm, cuirpm, hpfpm;
|
||||
u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return sprintf(buf, "0\n");
|
||||
|
||||
opm = device->path_data.opm;
|
||||
nppm = device->path_data.npm;
|
||||
cablepm = device->path_data.cablepm;
|
||||
cuirpm = device->path_data.cuirpm;
|
||||
hpfpm = device->path_data.hpfpm;
|
||||
opm = dasd_path_get_opm(device);
|
||||
nppm = dasd_path_get_nppm(device);
|
||||
cablepm = dasd_path_get_cablepm(device);
|
||||
cuirpm = dasd_path_get_cuirpm(device);
|
||||
hpfpm = dasd_path_get_hpfpm(device);
|
||||
ifccpm = dasd_path_get_ifccpm(device);
|
||||
dasd_put_device(device);
|
||||
|
||||
return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
|
||||
cablepm, cuirpm, hpfpm);
|
||||
return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
|
||||
cablepm, cuirpm, hpfpm, ifccpm);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
|
||||
|
||||
/*
|
||||
* threshold value for IFCC/CCC errors
|
||||
*/
|
||||
static ssize_t
|
||||
dasd_path_threshold_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
int len;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
|
||||
dasd_put_device(device);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
unsigned long flags;
|
||||
unsigned long val;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
|
||||
if ((kstrtoul(buf, 10, &val) != 0) ||
|
||||
(val > DASD_THRHLD_MAX) || val == 0) {
|
||||
dasd_put_device(device);
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
|
||||
if (val)
|
||||
device->path_thrhld = val;
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
|
||||
dasd_put_device(device);
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
|
||||
dasd_path_threshold_store);
|
||||
/*
|
||||
* interval for IFCC/CCC checks
|
||||
* meaning time with no IFCC/CCC error before the error counter
|
||||
* gets reset
|
||||
*/
|
||||
static ssize_t
|
||||
dasd_path_interval_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
int len;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
|
||||
dasd_put_device(device);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
unsigned long flags;
|
||||
unsigned long val;
|
||||
|
||||
device = dasd_device_from_cdev(to_ccwdev(dev));
|
||||
if (IS_ERR(device))
|
||||
return -ENODEV;
|
||||
|
||||
if ((kstrtoul(buf, 10, &val) != 0) ||
|
||||
(val > DASD_INTERVAL_MAX) || val == 0) {
|
||||
dasd_put_device(device);
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
|
||||
if (val)
|
||||
device->path_interval = val;
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
|
||||
dasd_put_device(device);
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
|
||||
dasd_path_interval_store);
|
||||
|
||||
|
||||
static struct attribute * dasd_attrs[] = {
|
||||
&dev_attr_readonly.attr,
|
||||
&dev_attr_discipline.attr,
|
||||
|
@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = {
|
|||
&dev_attr_safe_offline.attr,
|
||||
&dev_attr_host_access_count.attr,
|
||||
&dev_attr_path_masks.attr,
|
||||
&dev_attr_path_threshold.attr,
|
||||
&dev_attr_path_interval.attr,
|
||||
&dev_attr_path_reset.attr,
|
||||
&dev_attr_hpf.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
|
|||
{
|
||||
struct dasd_devmap *devmap;
|
||||
|
||||
devmap = dasd_find_busid(dev_name(&cdev->dev));
|
||||
devmap = dasd_devmap_from_cdev(cdev);
|
||||
if (IS_ERR(devmap))
|
||||
return PTR_ERR(devmap);
|
||||
|
||||
|
|
|
@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
|
|||
private->conf_data = NULL;
|
||||
private->conf_len = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
kfree(private->path_conf_data[i]);
|
||||
private->path_conf_data[i] = NULL;
|
||||
kfree(device->path[i].conf_data);
|
||||
device->path[i].conf_data = NULL;
|
||||
device->path[i].cssid = 0;
|
||||
device->path[i].ssid = 0;
|
||||
device->path[i].chpid = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
int rc, path_err, pos;
|
||||
__u8 lpm, opm;
|
||||
struct dasd_eckd_private *private, path_private;
|
||||
struct dasd_path *path_data;
|
||||
struct dasd_uid *uid;
|
||||
char print_path_uid[60], print_device_uid[60];
|
||||
struct channel_path_desc *chp_desc;
|
||||
struct subchannel_id sch_id;
|
||||
|
||||
private = device->private;
|
||||
path_data = &device->path_data;
|
||||
opm = ccw_device_get_path_mask(device->cdev);
|
||||
ccw_device_get_schid(device->cdev, &sch_id);
|
||||
conf_data_saved = 0;
|
||||
path_err = 0;
|
||||
/* get configuration data per operational path */
|
||||
|
@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
"No configuration data "
|
||||
"retrieved");
|
||||
/* no further analysis possible */
|
||||
path_data->opm |= lpm;
|
||||
dasd_path_add_opm(device, opm);
|
||||
continue; /* no error */
|
||||
}
|
||||
/* save first valid configuration data */
|
||||
|
@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
}
|
||||
pos = pathmask_to_pos(lpm);
|
||||
/* store per path conf_data */
|
||||
private->path_conf_data[pos] =
|
||||
(struct dasd_conf_data *) conf_data;
|
||||
device->path[pos].conf_data = conf_data;
|
||||
device->path[pos].cssid = sch_id.cssid;
|
||||
device->path[pos].ssid = sch_id.ssid;
|
||||
chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
|
||||
if (chp_desc)
|
||||
device->path[pos].chpid = chp_desc->chpid;
|
||||
kfree(chp_desc);
|
||||
/*
|
||||
* build device UID that other path data
|
||||
* can be compared to it
|
||||
|
@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
"device %s instead of %s\n", lpm,
|
||||
print_path_uid, print_device_uid);
|
||||
path_err = -EINVAL;
|
||||
path_data->cablepm |= lpm;
|
||||
dasd_path_add_cablepm(device, lpm);
|
||||
continue;
|
||||
}
|
||||
pos = pathmask_to_pos(lpm);
|
||||
/* store per path conf_data */
|
||||
private->path_conf_data[pos] =
|
||||
(struct dasd_conf_data *) conf_data;
|
||||
device->path[pos].conf_data = conf_data;
|
||||
device->path[pos].cssid = sch_id.cssid;
|
||||
device->path[pos].ssid = sch_id.ssid;
|
||||
chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
|
||||
if (chp_desc)
|
||||
device->path[pos].chpid = chp_desc->chpid;
|
||||
kfree(chp_desc);
|
||||
path_private.conf_data = NULL;
|
||||
path_private.conf_len = 0;
|
||||
}
|
||||
switch (dasd_eckd_path_access(conf_data, conf_len)) {
|
||||
case 0x02:
|
||||
path_data->npm |= lpm;
|
||||
dasd_path_add_nppm(device, lpm);
|
||||
break;
|
||||
case 0x03:
|
||||
path_data->ppm |= lpm;
|
||||
dasd_path_add_ppm(device, lpm);
|
||||
break;
|
||||
}
|
||||
if (!path_data->opm) {
|
||||
path_data->opm = lpm;
|
||||
if (!dasd_path_get_opm(device)) {
|
||||
dasd_path_set_opm(device, lpm);
|
||||
dasd_generic_path_operational(device);
|
||||
} else {
|
||||
path_data->opm |= lpm;
|
||||
dasd_path_add_opm(device, lpm);
|
||||
}
|
||||
/*
|
||||
* if the path is used
|
||||
* it should not be in one of the negative lists
|
||||
*/
|
||||
path_data->cablepm &= ~lpm;
|
||||
path_data->hpfpm &= ~lpm;
|
||||
path_data->cuirpm &= ~lpm;
|
||||
}
|
||||
|
||||
return path_err;
|
||||
}
|
||||
|
||||
static u32 get_fcx_max_data(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int fcx_in_css, fcx_in_gneq, fcx_in_features;
|
||||
int tpm, mdc;
|
||||
|
||||
if (dasd_nofcx)
|
||||
return 0;
|
||||
/* is transport mode supported? */
|
||||
fcx_in_css = css_general_characteristics.fcx;
|
||||
fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
|
||||
fcx_in_features = private->features.feature[40] & 0x80;
|
||||
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
|
||||
|
||||
if (!tpm)
|
||||
return 0;
|
||||
|
||||
mdc = ccw_device_get_mdc(device->cdev, 0);
|
||||
if (mdc < 0) {
|
||||
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
|
||||
return 0;
|
||||
} else {
|
||||
return (u32)mdc * FCX_MAX_DATA_FACTOR;
|
||||
}
|
||||
}
|
||||
|
||||
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device,
|
|||
struct path_verification_work_data *data)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
struct dasd_path *path_data = &device->path_data;
|
||||
__u8 lpm, opm = path_data->opm;
|
||||
__u8 lpm, opm = dasd_path_get_opm(device);
|
||||
int rc = -ENODEV;
|
||||
|
||||
for (lpm = 0x80; lpm; lpm >>= 1) {
|
||||
|
@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work)
|
|||
* in other case the device UID may have changed and
|
||||
* the first working path UID will be used as device UID
|
||||
*/
|
||||
if (device->path_data.opm &&
|
||||
if (dasd_path_get_opm(device) &&
|
||||
dasd_eckd_compare_path_uid(device, &path_private)) {
|
||||
/*
|
||||
* the comparison was not successful
|
||||
|
@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work)
|
|||
* situation in dasd_start_IO.
|
||||
*/
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (!device->path_data.opm && opm) {
|
||||
device->path_data.opm = opm;
|
||||
device->path_data.cablepm &= ~opm;
|
||||
device->path_data.cuirpm &= ~opm;
|
||||
device->path_data.hpfpm &= ~opm;
|
||||
if (!dasd_path_get_opm(device) && opm) {
|
||||
dasd_path_set_opm(device, opm);
|
||||
dasd_generic_path_operational(device);
|
||||
} else {
|
||||
device->path_data.opm |= opm;
|
||||
device->path_data.cablepm &= ~opm;
|
||||
device->path_data.cuirpm &= ~opm;
|
||||
device->path_data.hpfpm &= ~opm;
|
||||
dasd_path_add_opm(device, opm);
|
||||
}
|
||||
device->path_data.npm |= npm;
|
||||
device->path_data.ppm |= ppm;
|
||||
device->path_data.tbvpm |= epm;
|
||||
device->path_data.cablepm |= cablepm;
|
||||
device->path_data.hpfpm |= hpfpm;
|
||||
dasd_path_add_nppm(device, npm);
|
||||
dasd_path_add_ppm(device, ppm);
|
||||
dasd_path_add_tbvpm(device, epm);
|
||||
dasd_path_add_cablepm(device, cablepm);
|
||||
dasd_path_add_nohpfpm(device, hpfpm);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
}
|
||||
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
|
||||
|
@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!private->fcx_max_data)
|
||||
private->fcx_max_data = get_fcx_max_data(device);
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
|
||||
dasd_schedule_device_bh(device);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
}
|
||||
|
||||
static int dasd_eckd_read_features(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
|
|||
dasd_put_device(device);
|
||||
}
|
||||
|
||||
static u32 get_fcx_max_data(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int fcx_in_css, fcx_in_gneq, fcx_in_features;
|
||||
int tpm, mdc;
|
||||
|
||||
if (dasd_nofcx)
|
||||
return 0;
|
||||
/* is transport mode supported? */
|
||||
fcx_in_css = css_general_characteristics.fcx;
|
||||
fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
|
||||
fcx_in_features = private->features.feature[40] & 0x80;
|
||||
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
|
||||
|
||||
if (!tpm)
|
||||
return 0;
|
||||
|
||||
mdc = ccw_device_get_mdc(device->cdev, 0);
|
||||
if (mdc < 0) {
|
||||
dev_warn(&device->cdev->dev, "Detecting the maximum supported"
|
||||
" data size for zHPF requests failed\n");
|
||||
return 0;
|
||||
} else
|
||||
return (u32)mdc * FCX_MAX_DATA_FACTOR;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check device characteristics.
|
||||
* If the device is accessible using ECKD discipline, the device is enabled.
|
||||
|
@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
|
|||
if (rc)
|
||||
goto out_err1;
|
||||
|
||||
/* set default timeout */
|
||||
/* set some default values */
|
||||
device->default_expires = DASD_EXPIRES;
|
||||
/* set default retry count */
|
||||
device->default_retries = DASD_RETRIES;
|
||||
device->path_thrhld = DASD_ECKD_PATH_THRHLD;
|
||||
device->path_interval = DASD_ECKD_PATH_INTERVAL;
|
||||
|
||||
if (private->gneq) {
|
||||
value = 1;
|
||||
|
@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
|||
private->gneq = NULL;
|
||||
private->conf_len = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
kfree(private->path_conf_data[i]);
|
||||
if ((__u8 *)private->path_conf_data[i] ==
|
||||
kfree(device->path[i].conf_data);
|
||||
if ((__u8 *)device->path[i].conf_data ==
|
||||
private->conf_data) {
|
||||
private->conf_data = NULL;
|
||||
private->conf_len = 0;
|
||||
}
|
||||
private->path_conf_data[i] = NULL;
|
||||
device->path[i].conf_data = NULL;
|
||||
device->path[i].cssid = 0;
|
||||
device->path[i].ssid = 0;
|
||||
device->path[i].chpid = 0;
|
||||
}
|
||||
kfree(private->conf_data);
|
||||
private->conf_data = NULL;
|
||||
|
@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
|
|||
if (cqr->block && (cqr->startdev != cqr->block->base)) {
|
||||
dasd_eckd_reset_ccw_to_base_io(cqr);
|
||||
cqr->startdev = cqr->block->base;
|
||||
cqr->lpm = cqr->block->base->path_data.opm;
|
||||
cqr->lpm = dasd_path_get_opm(cqr->block->base);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->lpm = dasd_path_get_ppm(startdev);
|
||||
cqr->retries = startdev->default_retries;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->lpm = dasd_path_get_ppm(startdev);
|
||||
cqr->retries = startdev->default_retries;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->lpm = dasd_path_get_ppm(startdev);
|
||||
cqr->retries = startdev->default_retries;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ;
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->lpm = dasd_path_get_ppm(startdev);
|
||||
cqr->retries = startdev->default_retries;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
|
|||
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
|
||||
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
|
||||
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
|
||||
irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
|
||||
irb->scsw.tm.fcxs,
|
||||
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
|
||||
req ? req->intrc : 0);
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" device %s: Failing TCW: %p\n",
|
||||
|
@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
|
|||
*/
|
||||
static int
|
||||
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
|
||||
__u32 message_id,
|
||||
struct channel_path_desc *desc,
|
||||
struct subchannel_id sch_id)
|
||||
__u32 message_id, __u8 lpum)
|
||||
{
|
||||
struct dasd_psf_cuir_response *psf_cuir;
|
||||
int pos = pathmask_to_pos(lpum);
|
||||
struct dasd_ccw_req *cqr;
|
||||
struct ccw1 *ccw;
|
||||
int rc;
|
||||
|
@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
|
|||
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
|
||||
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
|
||||
psf_cuir->cc = response;
|
||||
if (desc)
|
||||
psf_cuir->chpid = desc->chpid;
|
||||
psf_cuir->chpid = device->path[pos].chpid;
|
||||
psf_cuir->message_id = message_id;
|
||||
psf_cuir->cssid = sch_id.cssid;
|
||||
psf_cuir->ssid = sch_id.ssid;
|
||||
psf_cuir->cssid = device->path[pos].cssid;
|
||||
psf_cuir->ssid = device->path[pos].ssid;
|
||||
ccw = cqr->cpaddr;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_PSF;
|
||||
ccw->cda = (__u32)(addr_t)psf_cuir;
|
||||
|
@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
|
|||
__u8 lpum,
|
||||
struct dasd_cuir_message *cuir)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
struct dasd_conf_data *conf_data;
|
||||
int path, pos;
|
||||
|
||||
if (cuir->record_selector == 0)
|
||||
goto out;
|
||||
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
|
||||
conf_data = private->path_conf_data[pos];
|
||||
conf_data = device->path[pos].conf_data;
|
||||
if (conf_data->gneq.record_selector ==
|
||||
cuir->record_selector)
|
||||
return conf_data;
|
||||
}
|
||||
out:
|
||||
return private->path_conf_data[pathmask_to_pos(lpum)];
|
||||
return device->path[pathmask_to_pos(lpum)].conf_data;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5391,7 +5406,6 @@ out:
|
|||
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
|
||||
struct dasd_cuir_message *cuir)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
struct dasd_conf_data *ref_conf_data;
|
||||
unsigned long bitmask = 0, mask = 0;
|
||||
struct dasd_conf_data *conf_data;
|
||||
|
@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
|
|||
mask |= cuir->neq_map[1] << 8;
|
||||
mask |= cuir->neq_map[0] << 16;
|
||||
|
||||
for (path = 0x80; path; path >>= 1) {
|
||||
for (path = 0; path < 8; path++) {
|
||||
/* initialise data per path */
|
||||
bitmask = mask;
|
||||
pos = pathmask_to_pos(path);
|
||||
conf_data = private->path_conf_data[pos];
|
||||
conf_data = device->path[path].conf_data;
|
||||
pos = 8 - ffs(cuir->ned_map);
|
||||
ned = (char *) &conf_data->neds[pos];
|
||||
/* compare reference ned and per path ned */
|
||||
|
@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
|
|||
continue;
|
||||
/* device and path match the reference values
|
||||
add path to CUIR scope */
|
||||
tbcpm |= path;
|
||||
tbcpm |= 0x80 >> path;
|
||||
}
|
||||
return tbcpm;
|
||||
}
|
||||
|
||||
static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
|
||||
unsigned long paths,
|
||||
struct subchannel_id sch_id, int action)
|
||||
unsigned long paths, int action)
|
||||
{
|
||||
struct channel_path_desc *desc;
|
||||
int pos;
|
||||
|
||||
while (paths) {
|
||||
/* get position of bit in mask */
|
||||
pos = ffs(paths) - 1;
|
||||
pos = 8 - ffs(paths);
|
||||
/* get channel path descriptor from this position */
|
||||
desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
|
||||
if (action == CUIR_QUIESCE)
|
||||
pr_warn("Service on the storage server caused path "
|
||||
"%x.%02x to go offline", sch_id.cssid,
|
||||
desc ? desc->chpid : 0);
|
||||
pr_warn("Service on the storage server caused path %x.%02x to go offline",
|
||||
device->path[pos].cssid,
|
||||
device->path[pos].chpid);
|
||||
else if (action == CUIR_RESUME)
|
||||
pr_info("Path %x.%02x is back online after service "
|
||||
"on the storage server", sch_id.cssid,
|
||||
desc ? desc->chpid : 0);
|
||||
kfree(desc);
|
||||
clear_bit(pos, &paths);
|
||||
pr_info("Path %x.%02x is back online after service on the storage server",
|
||||
device->path[pos].cssid,
|
||||
device->path[pos].chpid);
|
||||
clear_bit(7 - pos, &paths);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
|
|||
|
||||
tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
|
||||
/* nothing to do if path is not in use */
|
||||
if (!(device->path_data.opm & tbcpm))
|
||||
if (!(dasd_path_get_opm(device) & tbcpm))
|
||||
return 0;
|
||||
if (!(device->path_data.opm & ~tbcpm)) {
|
||||
if (!(dasd_path_get_opm(device) & ~tbcpm)) {
|
||||
/* no path would be left if the CUIR action is taken
|
||||
return error */
|
||||
return -EINVAL;
|
||||
}
|
||||
/* remove device from operational path mask */
|
||||
device->path_data.opm &= ~tbcpm;
|
||||
device->path_data.cuirpm |= tbcpm;
|
||||
dasd_path_remove_opm(device, tbcpm);
|
||||
dasd_path_add_cuirpm(device, tbcpm);
|
||||
return tbcpm;
|
||||
}
|
||||
|
||||
|
@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
|
|||
* notify the already set offline devices again
|
||||
*/
|
||||
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
|
||||
struct subchannel_id sch_id,
|
||||
struct dasd_cuir_message *cuir)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
|
|||
}
|
||||
}
|
||||
/* notify user about all paths affected by CUIR action */
|
||||
dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
|
||||
dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
|
||||
return 0;
|
||||
out_err:
|
||||
return tbcpm;
|
||||
}
|
||||
|
||||
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
||||
struct subchannel_id sch_id,
|
||||
struct dasd_cuir_message *cuir)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|||
alias_list) {
|
||||
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
||||
paths |= tbcpm;
|
||||
if (!(dev->path_data.opm & tbcpm)) {
|
||||
dev->path_data.tbvpm |= tbcpm;
|
||||
if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
||||
dasd_path_add_tbvpm(dev, tbcpm);
|
||||
dasd_schedule_device_bh(dev);
|
||||
}
|
||||
}
|
||||
|
@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|||
alias_list) {
|
||||
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
||||
paths |= tbcpm;
|
||||
if (!(dev->path_data.opm & tbcpm)) {
|
||||
dev->path_data.tbvpm |= tbcpm;
|
||||
if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
||||
dasd_path_add_tbvpm(dev, tbcpm);
|
||||
dasd_schedule_device_bh(dev);
|
||||
}
|
||||
}
|
||||
|
@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|||
alias_list) {
|
||||
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
||||
paths |= tbcpm;
|
||||
if (!(dev->path_data.opm & tbcpm)) {
|
||||
dev->path_data.tbvpm |= tbcpm;
|
||||
if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
||||
dasd_path_add_tbvpm(dev, tbcpm);
|
||||
dasd_schedule_device_bh(dev);
|
||||
}
|
||||
}
|
||||
|
@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
|
|||
alias_list) {
|
||||
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
|
||||
paths |= tbcpm;
|
||||
if (!(dev->path_data.opm & tbcpm)) {
|
||||
dev->path_data.tbvpm |= tbcpm;
|
||||
if (!(dasd_path_get_opm(dev) & tbcpm)) {
|
||||
dasd_path_add_tbvpm(dev, tbcpm);
|
||||
dasd_schedule_device_bh(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* notify user about all paths affected by CUIR action */
|
||||
dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
|
||||
dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
|
|||
__u8 lpum)
|
||||
{
|
||||
struct dasd_cuir_message *cuir = messages;
|
||||
struct channel_path_desc *desc;
|
||||
struct subchannel_id sch_id;
|
||||
int pos, response;
|
||||
int response;
|
||||
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"CUIR request: %016llx %016llx %016llx %08x",
|
||||
((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
|
||||
((u32 *)cuir)[3]);
|
||||
ccw_device_get_schid(device->cdev, &sch_id);
|
||||
pos = pathmask_to_pos(lpum);
|
||||
desc = ccw_device_get_chp_desc(device->cdev, pos);
|
||||
|
||||
if (cuir->code == CUIR_QUIESCE) {
|
||||
/* quiesce */
|
||||
if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
|
||||
if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
|
||||
response = PSF_CUIR_LAST_PATH;
|
||||
else
|
||||
response = PSF_CUIR_COMPLETED;
|
||||
} else if (cuir->code == CUIR_RESUME) {
|
||||
/* resume */
|
||||
dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
|
||||
dasd_eckd_cuir_resume(device, lpum, cuir);
|
||||
response = PSF_CUIR_COMPLETED;
|
||||
} else
|
||||
response = PSF_CUIR_NOT_SUPPORTED;
|
||||
|
||||
dasd_eckd_psf_cuir_response(device, response,
|
||||
cuir->message_id, desc, sch_id);
|
||||
cuir->message_id, lpum);
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"CUIR response: %d on message ID %08x", response,
|
||||
cuir->message_id);
|
||||
/* free descriptor copy */
|
||||
kfree(desc);
|
||||
/* to make sure there is no attention left schedule work again */
|
||||
device->discipline->check_attention(device, lpum);
|
||||
}
|
||||
|
@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
|
||||
{
|
||||
if (~lpum & dasd_path_get_opm(device)) {
|
||||
dasd_path_add_nohpfpm(device, lpum);
|
||||
dasd_path_remove_opm(device, lpum);
|
||||
dev_err(&device->cdev->dev,
|
||||
"Channel path %02X lost HPF functionality and is disabled\n",
|
||||
lpum);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
||||
dev_err(&device->cdev->dev,
|
||||
"High Performance FICON disabled\n");
|
||||
private->fcx_max_data = 0;
|
||||
}
|
||||
|
||||
static int dasd_eckd_hpf_enabled(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
||||
return private->fcx_max_data ? 1 : 0;
|
||||
}
|
||||
|
||||
static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
|
||||
struct irb *irb)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
|
||||
if (!private->fcx_max_data) {
|
||||
/* sanity check for no HPF, the error makes no sense */
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"Trying to disable HPF for a non HPF device");
|
||||
return;
|
||||
}
|
||||
if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
|
||||
dasd_eckd_disable_hpf_device(device);
|
||||
} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
|
||||
if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
|
||||
return;
|
||||
dasd_eckd_disable_hpf_device(device);
|
||||
dasd_path_set_tbvpm(device,
|
||||
dasd_path_get_hpfpm(device));
|
||||
}
|
||||
/*
|
||||
* prevent that any new I/O ist started on the device and schedule a
|
||||
* requeue of existing requests
|
||||
*/
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
|
||||
dasd_schedule_requeue(device);
|
||||
}
|
||||
|
||||
static struct ccw_driver dasd_eckd_driver = {
|
||||
.driver = {
|
||||
.name = "dasd-eckd",
|
||||
|
@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
|
|||
.check_attention = dasd_eckd_check_attention,
|
||||
.host_access_count = dasd_eckd_host_access_count,
|
||||
.hosts_print = dasd_hosts_print,
|
||||
.handle_hpf_error = dasd_eckd_handle_hpf_error,
|
||||
.disable_hpf = dasd_eckd_disable_hpf_device,
|
||||
.hpf_enabled = dasd_eckd_hpf_enabled,
|
||||
.reset_path = dasd_eckd_reset_path,
|
||||
};
|
||||
|
||||
static int __init
|
||||
|
|
|
@ -94,6 +94,8 @@
|
|||
#define FCX_MAX_DATA_FACTOR 65536
|
||||
#define DASD_ECKD_RCD_DATA_SIZE 256
|
||||
|
||||
#define DASD_ECKD_PATH_THRHLD 256
|
||||
#define DASD_ECKD_PATH_INTERVAL 300
|
||||
|
||||
/*****************************************************************************
|
||||
* SECTION: Type Definitions
|
||||
|
@ -535,8 +537,7 @@ struct dasd_eckd_private {
|
|||
struct dasd_eckd_characteristics rdc_data;
|
||||
u8 *conf_data;
|
||||
int conf_len;
|
||||
/* per path configuration data */
|
||||
struct dasd_conf_data *path_conf_data[8];
|
||||
|
||||
/* pointers to specific parts in the conf_data */
|
||||
struct dasd_ned *ned;
|
||||
struct dasd_sneq *sneq;
|
||||
|
|
|
@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
|
|||
*/
|
||||
int dasd_eer_enable(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
struct dasd_ccw_req *cqr = NULL;
|
||||
unsigned long flags;
|
||||
struct ccw1 *ccw;
|
||||
int rc = 0;
|
||||
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (device->eer_cqr)
|
||||
return 0;
|
||||
goto out;
|
||||
else if (!device->discipline ||
|
||||
strcmp(device->discipline->name, "ECKD"))
|
||||
rc = -EMEDIUMTYPE;
|
||||
else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
|
||||
rc = -EBUSY;
|
||||
|
||||
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
|
||||
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
|
||||
SNSS_DATA_SIZE, device);
|
||||
if (IS_ERR(cqr))
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(cqr)) {
|
||||
rc = -ENOMEM;
|
||||
cqr = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cqr->startdev = device;
|
||||
cqr->retries = 255;
|
||||
|
@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device)
|
|||
cqr->status = DASD_CQR_FILLED;
|
||||
cqr->callback = dasd_eer_snss_cb;
|
||||
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (!device->eer_cqr) {
|
||||
device->eer_cqr = cqr;
|
||||
cqr = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
|
||||
if (cqr)
|
||||
dasd_kfree_request(cqr, device);
|
||||
return 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
|
|||
"default ERP called (%i retries left)",
|
||||
cqr->retries);
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->lpm = dasd_path_get_opm(device);
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
} else {
|
||||
pr_err("%s: default ERP has run out of retries and failed\n",
|
||||
|
|
|
@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
|
|||
|
||||
device->default_expires = DASD_EXPIRES;
|
||||
device->default_retries = FBA_DEFAULT_RETRIES;
|
||||
device->path_data.opm = LPM_ANYPATH;
|
||||
dasd_path_set_opm(device, LPM_ANYPATH);
|
||||
|
||||
readonly = dasd_device_is_ro(device);
|
||||
if (readonly)
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include <asm/debug.h>
|
||||
#include <asm/dasd.h>
|
||||
#include <asm/idals.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/* DASD discipline magic */
|
||||
#define DASD_ECKD_MAGIC 0xC5C3D2C4
|
||||
|
@ -377,6 +378,10 @@ struct dasd_discipline {
|
|||
int (*check_attention)(struct dasd_device *, __u8);
|
||||
int (*host_access_count)(struct dasd_device *);
|
||||
int (*hosts_print)(struct dasd_device *, struct seq_file *);
|
||||
void (*handle_hpf_error)(struct dasd_device *, struct irb *);
|
||||
void (*disable_hpf)(struct dasd_device *);
|
||||
int (*hpf_enabled)(struct dasd_device *);
|
||||
void (*reset_path)(struct dasd_device *, __u8);
|
||||
};
|
||||
|
||||
extern struct dasd_discipline *dasd_diag_discipline_pointer;
|
||||
|
@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
|
|||
#define DASD_EER_STATECHANGE 3
|
||||
#define DASD_EER_PPRCSUSPEND 4
|
||||
|
||||
/* DASD path handling */
|
||||
|
||||
#define DASD_PATH_OPERATIONAL 1
|
||||
#define DASD_PATH_TBV 2
|
||||
#define DASD_PATH_PP 3
|
||||
#define DASD_PATH_NPP 4
|
||||
#define DASD_PATH_MISCABLED 5
|
||||
#define DASD_PATH_NOHPF 6
|
||||
#define DASD_PATH_CUIR 7
|
||||
#define DASD_PATH_IFCC 8
|
||||
|
||||
#define DASD_THRHLD_MAX 4294967295U
|
||||
#define DASD_INTERVAL_MAX 4294967295U
|
||||
|
||||
struct dasd_path {
|
||||
__u8 opm;
|
||||
__u8 tbvpm;
|
||||
__u8 ppm;
|
||||
__u8 npm;
|
||||
/* paths that are not used because of a special condition */
|
||||
__u8 cablepm; /* miss-cabled */
|
||||
__u8 hpfpm; /* the HPF requirements of the other paths are not met */
|
||||
__u8 cuirpm; /* CUIR varied offline */
|
||||
unsigned long flags;
|
||||
u8 cssid;
|
||||
u8 ssid;
|
||||
u8 chpid;
|
||||
struct dasd_conf_data *conf_data;
|
||||
atomic_t error_count;
|
||||
unsigned long long errorclk;
|
||||
};
|
||||
|
||||
|
||||
struct dasd_profile_info {
|
||||
/* legacy part of profile data, as in dasd_profile_info_t */
|
||||
unsigned int dasd_io_reqs; /* number of requests processed */
|
||||
|
@ -458,7 +477,8 @@ struct dasd_device {
|
|||
struct dasd_discipline *discipline;
|
||||
struct dasd_discipline *base_discipline;
|
||||
void *private;
|
||||
struct dasd_path path_data;
|
||||
struct dasd_path path[8];
|
||||
__u8 opm;
|
||||
|
||||
/* Device state and target state. */
|
||||
int state, target;
|
||||
|
@ -483,6 +503,7 @@ struct dasd_device {
|
|||
struct work_struct reload_device;
|
||||
struct work_struct kick_validate;
|
||||
struct work_struct suc_work;
|
||||
struct work_struct requeue_requests;
|
||||
struct timer_list timer;
|
||||
|
||||
debug_info_t *debug_area;
|
||||
|
@ -498,6 +519,9 @@ struct dasd_device {
|
|||
|
||||
unsigned long blk_timeout;
|
||||
|
||||
unsigned long path_thrhld;
|
||||
unsigned long path_interval;
|
||||
|
||||
struct dentry *debugfs_dentry;
|
||||
struct dentry *hosts_dentry;
|
||||
struct dasd_profile profile;
|
||||
|
@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int);
|
|||
void dasd_kick_device(struct dasd_device *);
|
||||
void dasd_restore_device(struct dasd_device *);
|
||||
void dasd_reload_device(struct dasd_device *);
|
||||
void dasd_schedule_requeue(struct dasd_device *);
|
||||
|
||||
void dasd_add_request_head(struct dasd_ccw_req *);
|
||||
void dasd_add_request_tail(struct dasd_ccw_req *);
|
||||
|
@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
|
|||
#define dasd_eer_enabled(d) (0)
|
||||
#endif /* CONFIG_DASD_ERR */
|
||||
|
||||
|
||||
/* DASD path handling functions */
|
||||
|
||||
/*
|
||||
* helper functions to modify bit masks for a given channel path for a device
|
||||
*/
|
||||
static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_verify(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_TBV, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_all_verify(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
dasd_path_clear_verify(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_operational(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
|
||||
device->opm |= (0x80 >> chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_NPP, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
|
||||
int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_preferred(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_PP, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_PP, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_preferred(struct dasd_device *device,
|
||||
int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_PP, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
|
||||
device->opm &= ~(0x80 >> chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_cuir(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
|
||||
{
|
||||
set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
|
||||
{
|
||||
clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
|
||||
{
|
||||
__clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
|
||||
{
|
||||
__set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
|
||||
{
|
||||
return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* get functions for path masks
|
||||
* will return a path masks for the given device
|
||||
*/
|
||||
|
||||
static inline __u8 dasd_path_get_opm(struct dasd_device *device)
|
||||
{
|
||||
return device->opm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 tbvpm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_need_verify(device, chp))
|
||||
tbvpm |= 0x80 >> chp;
|
||||
return tbvpm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 npm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
if (dasd_path_is_nonpreferred(device, chp))
|
||||
npm |= 0x80 >> chp;
|
||||
}
|
||||
return npm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 ppm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_is_preferred(device, chp))
|
||||
ppm |= 0x80 >> chp;
|
||||
return ppm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 cablepm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_is_miscabled(device, chp))
|
||||
cablepm |= 0x80 >> chp;
|
||||
return cablepm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 cuirpm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_is_cuir(device, chp))
|
||||
cuirpm |= 0x80 >> chp;
|
||||
return cuirpm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 ifccpm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_is_ifcc(device, chp))
|
||||
ifccpm |= 0x80 >> chp;
|
||||
return ifccpm;
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 hpfpm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_is_nohpf(device, chp))
|
||||
hpfpm |= 0x80 >> chp;
|
||||
return hpfpm;
|
||||
}
|
||||
|
||||
/*
|
||||
* add functions for path masks
|
||||
* the existing path mask will be extended by the given path mask
|
||||
*/
|
||||
static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_verify(device, chp);
|
||||
}
|
||||
|
||||
static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
__u8 nopm = 0x00;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (dasd_path_is_nohpf(device, chp) ||
|
||||
dasd_path_is_ifcc(device, chp) ||
|
||||
dasd_path_is_cuir(device, chp) ||
|
||||
dasd_path_is_miscabled(device, chp))
|
||||
nopm |= 0x80 >> chp;
|
||||
return nopm;
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp)) {
|
||||
dasd_path_operational(device, chp);
|
||||
/*
|
||||
* if the path is used
|
||||
* it should not be in one of the negative lists
|
||||
*/
|
||||
dasd_path_clear_nohpf(device, chp);
|
||||
dasd_path_clear_cuir(device, chp);
|
||||
dasd_path_clear_cable(device, chp);
|
||||
dasd_path_clear_ifcc(device, chp);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_miscabled(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_cuir(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_ifcc(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_nonpreferred(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_nohpf(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_preferred(device, chp);
|
||||
}
|
||||
|
||||
/*
|
||||
* set functions for path masks
|
||||
* the existing path mask will be replaced by the given path mask
|
||||
*/
|
||||
static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_verify(device, chp);
|
||||
else
|
||||
dasd_path_clear_verify(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
dasd_path_clear_oper(device, chp);
|
||||
if (pm & (0x80 >> chp)) {
|
||||
dasd_path_operational(device, chp);
|
||||
/*
|
||||
* if the path is used
|
||||
* it should not be in one of the negative lists
|
||||
*/
|
||||
dasd_path_clear_nohpf(device, chp);
|
||||
dasd_path_clear_cuir(device, chp);
|
||||
dasd_path_clear_cable(device, chp);
|
||||
dasd_path_clear_ifcc(device, chp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* remove functions for path masks
|
||||
* the existing path mask will be cleared with the given path mask
|
||||
*/
|
||||
static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
if (pm & (0x80 >> chp))
|
||||
dasd_path_clear_oper(device, chp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* add the newly available path to the to be verified pm and remove it from
|
||||
* normal operation until it is verified
|
||||
*/
|
||||
static inline void dasd_path_available(struct dasd_device *device, int chp)
|
||||
{
|
||||
dasd_path_clear_oper(device, chp);
|
||||
dasd_path_verify(device, chp);
|
||||
}
|
||||
|
||||
static inline void dasd_path_notoper(struct dasd_device *device, int chp)
|
||||
{
|
||||
dasd_path_clear_oper(device, chp);
|
||||
dasd_path_clear_preferred(device, chp);
|
||||
dasd_path_clear_nonpreferred(device, chp);
|
||||
}
|
||||
|
||||
/*
|
||||
* remove all paths from normal operation
|
||||
*/
|
||||
static inline void dasd_path_no_path(struct dasd_device *device)
|
||||
{
|
||||
int chp;
|
||||
|
||||
for (chp = 0; chp < 8; chp++)
|
||||
dasd_path_notoper(device, chp);
|
||||
|
||||
dasd_path_clear_all_verify(device);
|
||||
}
|
||||
|
||||
/* end - path handling */
|
||||
|
||||
#endif /* DASD_H */
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/tty.h>
|
||||
|
@ -1215,13 +1214,4 @@ static int __init tty3215_init(void)
|
|||
tty3215_driver = driver;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit tty3215_exit(void)
|
||||
{
|
||||
tty_unregister_driver(tty3215_driver);
|
||||
put_tty_driver(tty3215_driver);
|
||||
ccw_driver_unregister(&raw3215_ccw_driver);
|
||||
}
|
||||
|
||||
module_init(tty3215_init);
|
||||
module_exit(tty3215_exit);
|
||||
device_initcall(tty3215_init);
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
|
||||
typedef unsigned int sclp_cmdw_t;
|
||||
|
||||
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
|
||||
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
|
||||
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
|
||||
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
|
||||
|
@ -102,6 +103,28 @@ struct init_sccb {
|
|||
sccb_mask_t sclp_send_mask;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct read_cpu_info_sccb {
|
||||
struct sccb_header header;
|
||||
u16 nr_configured;
|
||||
u16 offset_configured;
|
||||
u16 nr_standby;
|
||||
u16 offset_standby;
|
||||
u8 reserved[4096 - 16];
|
||||
} __attribute__((packed, aligned(PAGE_SIZE)));
|
||||
|
||||
static inline void sclp_fill_core_info(struct sclp_core_info *info,
|
||||
struct read_cpu_info_sccb *sccb)
|
||||
{
|
||||
char *page = (char *) sccb;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->configured = sccb->nr_configured;
|
||||
info->standby = sccb->nr_standby;
|
||||
info->combined = sccb->nr_configured + sccb->nr_standby;
|
||||
memcpy(&info->core, page + sccb->offset_configured,
|
||||
info->combined * sizeof(struct sclp_core_entry));
|
||||
}
|
||||
|
||||
#define SCLP_HAS_CHP_INFO (sclp.facilities & 0x8000000000000000ULL)
|
||||
#define SCLP_HAS_CHP_RECONFIG (sclp.facilities & 0x2000000000000000ULL)
|
||||
#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL)
|
||||
|
|
|
@ -80,33 +80,10 @@ out:
|
|||
* CPU configuration related functions.
|
||||
*/
|
||||
|
||||
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
|
||||
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
|
||||
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
|
||||
|
||||
struct read_cpu_info_sccb {
|
||||
struct sccb_header header;
|
||||
u16 nr_configured;
|
||||
u16 offset_configured;
|
||||
u16 nr_standby;
|
||||
u16 offset_standby;
|
||||
u8 reserved[4096 - 16];
|
||||
} __attribute__((packed, aligned(PAGE_SIZE)));
|
||||
|
||||
static void sclp_fill_core_info(struct sclp_core_info *info,
|
||||
struct read_cpu_info_sccb *sccb)
|
||||
{
|
||||
char *page = (char *) sccb;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->configured = sccb->nr_configured;
|
||||
info->standby = sccb->nr_standby;
|
||||
info->combined = sccb->nr_configured + sccb->nr_standby;
|
||||
memcpy(&info->core, page + sccb->offset_configured,
|
||||
info->combined * sizeof(struct sclp_core_entry));
|
||||
}
|
||||
|
||||
int sclp_get_core_info(struct sclp_core_info *info)
|
||||
int _sclp_get_core_info(struct sclp_core_info *info)
|
||||
{
|
||||
int rc;
|
||||
struct read_cpu_info_sccb *sccb;
|
||||
|
|
|
@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb,
|
|||
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
|
||||
}
|
||||
|
||||
static struct sclp_core_info sclp_core_info_early __initdata;
|
||||
static int sclp_core_info_early_valid __initdata;
|
||||
|
||||
static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!SCLP_HAS_CPU_INFO)
|
||||
return;
|
||||
memset(sccb, 0, sizeof(*sccb));
|
||||
sccb->header.length = sizeof(*sccb);
|
||||
do {
|
||||
rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
|
||||
} while (rc == -EBUSY);
|
||||
if (rc)
|
||||
return;
|
||||
if (sccb->header.response_code != 0x0010)
|
||||
return;
|
||||
sclp_fill_core_info(&sclp_core_info_early, sccb);
|
||||
sclp_core_info_early_valid = 1;
|
||||
}
|
||||
|
||||
int __init _sclp_get_core_info_early(struct sclp_core_info *info)
|
||||
{
|
||||
if (!sclp_core_info_early_valid)
|
||||
return -EIO;
|
||||
*info = sclp_core_info_early;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
|
||||
{
|
||||
sccb_init_eq_size(sccb);
|
||||
|
@ -293,6 +323,7 @@ void __init sclp_early_detect(void)
|
|||
void *sccb = &sccb_early;
|
||||
|
||||
sclp_facilities_detect(sccb);
|
||||
sclp_init_core_info_early(sccb);
|
||||
sclp_hsa_size_detect(sccb);
|
||||
|
||||
/* Turn off SCLP event notifications. Also save remote masks in the
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/smp.h>
|
||||
|
@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void)
|
|||
{
|
||||
return sclp_register(&sclp_quiesce_event);
|
||||
}
|
||||
|
||||
module_init(sclp_quiesce_init);
|
||||
device_initcall(sclp_quiesce_init);
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/tty_driver.h>
|
||||
|
@ -573,4 +572,4 @@ sclp_tty_init(void)
|
|||
sclp_tty_driver = driver;
|
||||
return 0;
|
||||
}
|
||||
module_init(sclp_tty_init);
|
||||
device_initcall(sclp_tty_init);
|
||||
|
|
|
@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
|
|||
goto cleanup;
|
||||
|
||||
for (i=0; i < MAXMINOR; ++i ) {
|
||||
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
|
||||
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!sys_ser[i].buffer) {
|
||||
rc = -ENOMEM;
|
||||
break;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright IBM Corp. 2003, 2008
|
||||
* Author(s): Michael Holzheu
|
||||
* License: GPL
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zdump"
|
||||
|
@ -16,7 +17,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
|
@ -320,7 +320,7 @@ static int __init zcore_init(void)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
pr_alert("DETECTED 'S390X (64 bit) OS'\n");
|
||||
pr_alert("The dump process started for a 64-bit operating system\n");
|
||||
rc = init_cpu_info();
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
@ -364,22 +364,4 @@ fail:
|
|||
diag308(DIAG308_REL_HSA, NULL);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit zcore_exit(void)
|
||||
{
|
||||
debug_unregister(zcore_dbf);
|
||||
sclp_sdias_exit();
|
||||
free_page((unsigned long) ipl_block);
|
||||
debugfs_remove(zcore_hsa_file);
|
||||
debugfs_remove(zcore_reipl_file);
|
||||
debugfs_remove(zcore_memmap_file);
|
||||
debugfs_remove(zcore_dir);
|
||||
diag308(DIAG308_REL_HSA, NULL);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
|
||||
MODULE_DESCRIPTION("zcore module for zfcpdump support");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
subsys_initcall(zcore_init);
|
||||
module_exit(zcore_exit);
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timex.h> /* get_tod_clock() */
|
||||
|
@ -1389,13 +1389,7 @@ static int __init init_cmf(void)
|
|||
"%s (mode %s)\n", format_string, detect_string);
|
||||
return 0;
|
||||
}
|
||||
module_init(init_cmf);
|
||||
|
||||
|
||||
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("channel measurement facility base driver\n"
|
||||
"Copyright IBM Corp. 2003\n");
|
||||
device_initcall(init_cmf);
|
||||
|
||||
EXPORT_SYMBOL_GPL(enable_cmf);
|
||||
EXPORT_SYMBOL_GPL(disable_cmf);
|
||||
|
|
|
@ -5,12 +5,14 @@
|
|||
*
|
||||
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
*
|
||||
* License: GPL
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "cio"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv)
|
|||
driver_unregister(&cdrv->drv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(css_driver_unregister);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -5,12 +5,14 @@
|
|||
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* License: GPL
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "cio"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = {
|
|||
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(css, io_subchannel_ids);
|
||||
|
||||
static int io_subchannel_prepare(struct subchannel *sch)
|
||||
{
|
||||
|
@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ccw_device_siosl);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
EXPORT_SYMBOL(ccw_device_set_online);
|
||||
EXPORT_SYMBOL(ccw_device_set_offline);
|
||||
EXPORT_SYMBOL(ccw_driver_register);
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
*/
|
||||
enum dev_state {
|
||||
DEV_STATE_NOT_OPER,
|
||||
DEV_STATE_SENSE_PGID,
|
||||
DEV_STATE_SENSE_ID,
|
||||
DEV_STATE_OFFLINE,
|
||||
DEV_STATE_VERIFY,
|
||||
|
|
|
@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
|
|||
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
|
||||
[DEV_EVENT_VERIFY] = ccw_device_nop,
|
||||
},
|
||||
[DEV_STATE_SENSE_PGID] = {
|
||||
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
|
||||
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
|
||||
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
|
||||
[DEV_EVENT_VERIFY] = ccw_device_nop,
|
||||
},
|
||||
[DEV_STATE_SENSE_ID] = {
|
||||
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
|
||||
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
|
||||
|
|
|
@ -3,8 +3,10 @@
|
|||
*
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
*
|
||||
* License: GPL
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
EXPORT_SYMBOL(ccw_device_set_options_mask);
|
||||
EXPORT_SYMBOL(ccw_device_set_options);
|
||||
EXPORT_SYMBOL(ccw_device_clear_options);
|
||||
|
|
|
@ -1273,7 +1273,7 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
|
|||
return retval;
|
||||
}
|
||||
|
||||
static int ap_dev_suspend(struct device *dev, pm_message_t state)
|
||||
static int ap_dev_suspend(struct device *dev)
|
||||
{
|
||||
struct ap_device *ap_dev = to_ap_dev(dev);
|
||||
|
||||
|
@ -1287,11 +1287,6 @@ static int ap_dev_suspend(struct device *dev, pm_message_t state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ap_dev_resume(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ap_bus_suspend(void)
|
||||
{
|
||||
ap_suspend_flag = 1;
|
||||
|
@ -1356,12 +1351,13 @@ static struct notifier_block ap_power_notifier = {
|
|||
.notifier_call = ap_power_event,
|
||||
};
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL);
|
||||
|
||||
static struct bus_type ap_bus_type = {
|
||||
.name = "ap",
|
||||
.match = &ap_bus_match,
|
||||
.uevent = &ap_uevent,
|
||||
.suspend = ap_dev_suspend,
|
||||
.resume = ap_dev_resume,
|
||||
.pm = &ap_bus_pm_ops,
|
||||
};
|
||||
|
||||
void ap_device_init_reply(struct ap_device *ap_dev,
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <linux/notifier.h>
|
||||
|
@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void)
|
|||
return info;
|
||||
}
|
||||
|
||||
static void destroy_airq_info(struct airq_info *info)
|
||||
{
|
||||
if (!info)
|
||||
return;
|
||||
|
||||
unregister_adapter_interrupt(&info->airq);
|
||||
airq_iv_release(info->aiv);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
|
||||
u64 *first, void **airq_info)
|
||||
{
|
||||
|
@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = {
|
|||
{ CCW_DEVICE(0x3832, 0) },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(ccw, virtio_ids);
|
||||
|
||||
static struct ccw_driver virtio_ccw_driver = {
|
||||
.driver = {
|
||||
|
@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void)
|
|||
no_auto_parse();
|
||||
return ccw_driver_register(&virtio_ccw_driver);
|
||||
}
|
||||
module_init(virtio_ccw_init);
|
||||
|
||||
static void __exit virtio_ccw_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
ccw_driver_unregister(&virtio_ccw_driver);
|
||||
for (i = 0; i < MAX_AIRQ_AREAS; i++)
|
||||
destroy_airq_info(airq_areas[i]);
|
||||
}
|
||||
module_exit(virtio_ccw_exit);
|
||||
device_initcall(virtio_ccw_init);
|
||||
|
|
Loading…
Reference in New Issue