s390 updates for the 5.5 merge window

- Adjust PMU device drivers registration to avoid WARN_ON and few other
   perf improvements.
 
 - Enhance tracing in vfio-ccw.
 
 - Few stack unwinder fixes and improvements, convert get_wchan custom
   stack unwinding to generic api usage.
 
 - Fixes for mm helpers issues uncovered with tests validating architecture
   page table helpers.
 
 - Fix noexec bit handling when hardware doesn't support it.
 
 - Fix memleak and unsigned value compared with zero bugs in crypto
   code. Minor code simplification.
 
 - Fix crash during kdump with kasan enabled kernel.
 
 - Switch bug and alternatives from asm to asm_inline to improve inlining
   decisions.
 
 - Use 'depends on cc-option' for MARCH and TUNE options in Kconfig,
   add z13s and z14 ZR1 to TUNE descriptions.
 
 - Minor head64.S simplification.
 
 - Fix physical to logical CPU map for SMT.
 
 - Several cleanups in qdio code.
 
 - Other minor cleanups and fixes all over the code.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAl3ahGYACgkQjYWKoQLX
 FBguwAgAig+FNos8zkd7Sr2wg4DPL2IlYERVP40fOLXfGuVUOnMLg8OTO6yDWDpH
 5+cKAQS1wWgyvlfjWRUJ6anXLBAsgKRD1nyFIZTpn/wArGk/duCbnl/VFriDgrST
 8KTQDJpZ9w9nXtQ7lA2QWaw5U2WG8I2T2JuQJCdLXze7RXi0bDVe8e6131NMaJ42
 LLxqOqm8d8XDnd8oDVP04LT5IfhuI2cILoGBP/GyI2fqQk9Ems6M2gxuISq1COmy
 WORDLfwWyCLeF7gWKKjxf8Vo1HYcyoFvdXnxWiHb0TDZesQZJr/LLELTP03fbCW9
 U4jbXncnnPA7kT4tlC95jT5M69yK5w==
 =+FxG
 -----END PGP SIGNATURE-----

Merge tag 's390-5.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Adjust PMU device drivers registration to avoid WARN_ON and few other
   perf improvements.

 - Enhance tracing in vfio-ccw.

 - Few stack unwinder fixes and improvements, convert get_wchan custom
   stack unwinding to generic api usage.

 - Fixes for mm helpers issues uncovered with tests validating
   architecture page table helpers.

 - Fix noexec bit handling when hardware doesn't support it.

 - Fix memleak and unsigned value compared with zero bugs in crypto
   code. Minor code simplification.

 - Fix crash during kdump with kasan enabled kernel.

 - Switch bug and alternatives from asm to asm_inline to improve
   inlining decisions.

 - Use 'depends on cc-option' for MARCH and TUNE options in Kconfig, add
   z13s and z14 ZR1 to TUNE descriptions.

 - Minor head64.S simplification.

 - Fix physical to logical CPU map for SMT.

 - Several cleanups in qdio code.

 - Other minor cleanups and fixes all over the code.

* tag 's390-5.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (41 commits)
  s390/cpumf: Adjust registration of s390 PMU device drivers
  s390/smp: fix physical to logical CPU map for SMT
  s390/early: move access registers setup in C code
  s390/head64: remove unnecessary vdso_per_cpu_data setup
  s390/early: move control registers setup in C code
  s390/kasan: support memcpy_real with TRACE_IRQFLAGS
  s390/crypto: Fix unsigned variable compared with zero
  s390/pkey: use memdup_user() to simplify code
  s390/pkey: fix memory leak within _copy_apqns_from_user()
  s390/disassembler: don't hide instruction addresses
  s390/cpum_sf: Assign error value to err variable
  s390/cpum_sf: Replace function name in debug statements
  s390/cpum_sf: Use consistant debug print format for sampling
  s390/unwind: drop unnecessary code around calling ftrace_graph_ret_addr()
  s390: add error handling to perf_callchain_kernel
  s390: always inline current_stack_pointer()
  s390/mm: add mm_pxd_folded() checks to pxd_free()
  s390/mm: properly clear _PAGE_NOEXEC bit when it is not supported
  s390/mm: simplify page table helpers for large entries
  s390/mm: make pmd/pud_bad() report large entries as bad
  ...
This commit is contained in:
Linus Torvalds 2019-11-25 17:23:53 -08:00
commit ea1f56fa16
39 changed files with 410 additions and 372 deletions

View File

@ -246,8 +246,8 @@ choice
config MARCH_Z900 config MARCH_Z900
bool "IBM zSeries model z800 and z900" bool "IBM zSeries model z800 and z900"
depends on !CC_IS_CLANG
select HAVE_MARCH_Z900_FEATURES select HAVE_MARCH_Z900_FEATURES
depends on $(cc-option,-march=z900)
help help
Select this to enable optimizations for model z800/z900 (2064 and Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not 2066 series). This will enable some optimizations that are not
@ -255,8 +255,8 @@ config MARCH_Z900
config MARCH_Z990 config MARCH_Z990
bool "IBM zSeries model z890 and z990" bool "IBM zSeries model z890 and z990"
depends on !CC_IS_CLANG
select HAVE_MARCH_Z990_FEATURES select HAVE_MARCH_Z990_FEATURES
depends on $(cc-option,-march=z990)
help help
Select this to enable optimizations for model z890/z990 (2084 and Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work 2086 series). The kernel will be slightly faster but will not work
@ -264,8 +264,8 @@ config MARCH_Z990
config MARCH_Z9_109 config MARCH_Z9_109
bool "IBM System z9" bool "IBM System z9"
depends on !CC_IS_CLANG
select HAVE_MARCH_Z9_109_FEATURES select HAVE_MARCH_Z9_109_FEATURES
depends on $(cc-option,-march=z9-109)
help help
Select this to enable optimizations for IBM System z9 (2094 and Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work 2096 series). The kernel will be slightly faster but will not work
@ -274,6 +274,7 @@ config MARCH_Z9_109
config MARCH_Z10 config MARCH_Z10
bool "IBM System z10" bool "IBM System z10"
select HAVE_MARCH_Z10_FEATURES select HAVE_MARCH_Z10_FEATURES
depends on $(cc-option,-march=z10)
help help
Select this to enable optimizations for IBM System z10 (2097 and Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work 2098 series). The kernel will be slightly faster but will not work
@ -282,6 +283,7 @@ config MARCH_Z10
config MARCH_Z196 config MARCH_Z196
bool "IBM zEnterprise 114 and 196" bool "IBM zEnterprise 114 and 196"
select HAVE_MARCH_Z196_FEATURES select HAVE_MARCH_Z196_FEATURES
depends on $(cc-option,-march=z196)
help help
Select this to enable optimizations for IBM zEnterprise 114 and 196 Select this to enable optimizations for IBM zEnterprise 114 and 196
(2818 and 2817 series). The kernel will be slightly faster but will (2818 and 2817 series). The kernel will be slightly faster but will
@ -290,6 +292,7 @@ config MARCH_Z196
config MARCH_ZEC12 config MARCH_ZEC12
bool "IBM zBC12 and zEC12" bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES select HAVE_MARCH_ZEC12_FEATURES
depends on $(cc-option,-march=zEC12)
help help
Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
2827 series). The kernel will be slightly faster but will not work on 2827 series). The kernel will be slightly faster but will not work on
@ -298,6 +301,7 @@ config MARCH_ZEC12
config MARCH_Z13 config MARCH_Z13
bool "IBM z13s and z13" bool "IBM z13s and z13"
select HAVE_MARCH_Z13_FEATURES select HAVE_MARCH_Z13_FEATURES
depends on $(cc-option,-march=z13)
help help
Select this to enable optimizations for IBM z13s and z13 (2965 and Select this to enable optimizations for IBM z13s and z13 (2965 and
2964 series). The kernel will be slightly faster but will not work on 2964 series). The kernel will be slightly faster but will not work on
@ -306,6 +310,7 @@ config MARCH_Z13
config MARCH_Z14 config MARCH_Z14
bool "IBM z14 ZR1 and z14" bool "IBM z14 ZR1 and z14"
select HAVE_MARCH_Z14_FEATURES select HAVE_MARCH_Z14_FEATURES
depends on $(cc-option,-march=z14)
help help
Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
and 3906 series). The kernel will be slightly faster but will not and 3906 series). The kernel will be slightly faster but will not
@ -314,6 +319,7 @@ config MARCH_Z14
config MARCH_Z15 config MARCH_Z15
bool "IBM z15" bool "IBM z15"
select HAVE_MARCH_Z15_FEATURES select HAVE_MARCH_Z15_FEATURES
depends on $(cc-option,-march=z15)
help help
Select this to enable optimizations for IBM z15 (8562 Select this to enable optimizations for IBM z15 (8562
and 8561 series). The kernel will be slightly faster but will not and 8561 series). The kernel will be slightly faster but will not
@ -367,33 +373,39 @@ config TUNE_DEFAULT
config TUNE_Z900 config TUNE_Z900
bool "IBM zSeries model z800 and z900" bool "IBM zSeries model z800 and z900"
depends on !CC_IS_CLANG depends on $(cc-option,-mtune=z900)
config TUNE_Z990 config TUNE_Z990
bool "IBM zSeries model z890 and z990" bool "IBM zSeries model z890 and z990"
depends on !CC_IS_CLANG depends on $(cc-option,-mtune=z990)
config TUNE_Z9_109 config TUNE_Z9_109
bool "IBM System z9" bool "IBM System z9"
depends on !CC_IS_CLANG depends on $(cc-option,-mtune=z9-109)
config TUNE_Z10 config TUNE_Z10
bool "IBM System z10" bool "IBM System z10"
depends on $(cc-option,-mtune=z10)
config TUNE_Z196 config TUNE_Z196
bool "IBM zEnterprise 114 and 196" bool "IBM zEnterprise 114 and 196"
depends on $(cc-option,-mtune=z196)
config TUNE_ZEC12 config TUNE_ZEC12
bool "IBM zBC12 and zEC12" bool "IBM zBC12 and zEC12"
depends on $(cc-option,-mtune=zEC12)
config TUNE_Z13 config TUNE_Z13
bool "IBM z13" bool "IBM z13s and z13"
depends on $(cc-option,-mtune=z13)
config TUNE_Z14 config TUNE_Z14
bool "IBM z14" bool "IBM z14 ZR1 and z14"
depends on $(cc-option,-mtune=z14)
config TUNE_Z15 config TUNE_Z15
bool "IBM z15" bool "IBM z15"
depends on $(cc-option,-mtune=z15)
endchoice endchoice

View File

@ -46,7 +46,7 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = {
.diag0c = _diag0c_dma, .diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma .diag308_reset = _diag308_reset_dma
}; };
static struct diag210 _diag210_tmp_dma __section(".dma.data"); static struct diag210 _diag210_tmp_dma __section(.dma.data);
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma; struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void _swsusp_reset_dma(void); void _swsusp_reset_dma(void);
unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma); unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);

View File

@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc); struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm); unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits; u64 bits;
unsigned int n, mbl_offset; unsigned int n;
int mbl_offset;
n = ctx->count % bsize; n = ctx->count % bsize;
bits = ctx->count * 8; bits = ctx->count * 8;
mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32); mbl_offset = s390_crypto_shash_parmsize(ctx->func);
if (mbl_offset < 0) if (mbl_offset < 0)
return -EINVAL; return -EINVAL;
mbl_offset = mbl_offset / sizeof(u32);
/* set total msg bit length (mbl) in CPACF parmblock */ /* set total msg bit length (mbl) in CPACF parmblock */
switch (ctx->func) { switch (ctx->func) {
case CPACF_KLMD_SHA_1: case CPACF_KLMD_SHA_1:

View File

@ -139,10 +139,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* without volatile and memory clobber. * without volatile and memory clobber.
*/ */
#define alternative(oldinstr, altinstr, facility) \ #define alternative(oldinstr, altinstr, facility) \
asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ #define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
altinstr2, facility2) ::: "memory") altinstr2, facility2) ::: "memory")
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@ -9,7 +9,7 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define __EMIT_BUG(x) do { \ #define __EMIT_BUG(x) do { \
asm volatile( \ asm_inline volatile( \
"0: j 0b+2\n" \ "0: j 0b+2\n" \
"1:\n" \ "1:\n" \
".section .rodata.str,\"aMS\",@progbits,1\n" \ ".section .rodata.str,\"aMS\",@progbits,1\n" \
@ -28,7 +28,7 @@
#else /* CONFIG_DEBUG_BUGVERBOSE */ #else /* CONFIG_DEBUG_BUGVERBOSE */
#define __EMIT_BUG(x) do { \ #define __EMIT_BUG(x) do { \
asm volatile( \ asm_inline volatile( \
"0: j 0b+2\n" \ "0: j 0b+2\n" \
"1:\n" \ "1:\n" \
".section __bug_table,\"awM\",@progbits,%1\n" \ ".section __bug_table,\"awM\",@progbits,%1\n" \

View File

@ -11,6 +11,7 @@
#include <linux/bits.h> #include <linux/bits.h>
#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10) #define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49) #define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50) #define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52) #define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)

View File

@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION2_ENTRY_EMPTY); crst_table_init(table, _REGION2_ENTRY_EMPTY);
return (p4d_t *) table; return (p4d_t *) table;
} }
#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
if (!mm_p4d_folded(mm))
crst_table_free(mm, (unsigned long *) p4d);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION3_ENTRY_EMPTY); crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table; return (pud_t *) table;
} }
#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
if (!mm_pud_folded(mm))
crst_table_free(mm, (unsigned long *) pud);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{ {
@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
if (mm_pmd_folded(mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd)); pgtable_pmd_page_dtor(virt_to_page(pmd));
crst_table_free(mm, (unsigned long *) pmd); crst_table_free(mm, (unsigned long *) pmd);
} }

View File

@ -266,11 +266,9 @@ static inline int is_module_addr(void *addr)
#endif #endif
#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
/* Bits in the segment table entry */ /* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
@ -699,10 +697,8 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd) static inline int pmd_bad(pmd_t pmd)
{ {
if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0) if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
return 1; return 1;
if (pmd_large(pmd))
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
} }
@ -710,12 +706,10 @@ static inline int pud_bad(pud_t pud)
{ {
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
if (type > _REGION_ENTRY_TYPE_R3) if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
return 1; return 1;
if (type < _REGION_ENTRY_TYPE_R3) if (type < _REGION_ENTRY_TYPE_R3)
return 0; return 0;
if (pud_large(pud))
return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
} }
@ -758,18 +752,12 @@ static inline int pmd_write(pmd_t pmd)
static inline int pmd_dirty(pmd_t pmd) static inline int pmd_dirty(pmd_t pmd)
{ {
int dirty = 1; return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
if (pmd_large(pmd))
dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
return dirty;
} }
static inline int pmd_young(pmd_t pmd) static inline int pmd_young(pmd_t pmd)
{ {
int young = 1; return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
if (pmd_large(pmd))
young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
return young;
} }
static inline int pte_present(pte_t pte) static inline int pte_present(pte_t pte)
@ -1173,8 +1161,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
if (!MACHINE_HAS_NX)
pte_val(entry) &= ~_PAGE_NOEXEC;
if (pte_present(entry)) if (pte_present(entry))
pte_val(entry) &= ~_PAGE_UNUSED; pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm)) if (mm_has_pgste(mm))
@ -1191,6 +1177,8 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{ {
pte_t __pte; pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot); pte_val(__pte) = physpage + pgprot_val(pgprot);
if (!MACHINE_HAS_NX)
pte_val(__pte) &= ~_PAGE_NOEXEC;
return pte_mkyoung(__pte); return pte_mkyoung(__pte);
} }
@ -1297,29 +1285,23 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
static inline pmd_t pmd_mkwrite(pmd_t pmd) static inline pmd_t pmd_mkwrite(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
return pmd; pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd; return pmd;
} }
static inline pmd_t pmd_mkclean(pmd_t pmd) static inline pmd_t pmd_mkclean(pmd_t pmd)
{ {
if (pmd_large(pmd)) { pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
}
return pmd; return pmd;
} }
static inline pmd_t pmd_mkdirty(pmd_t pmd) static inline pmd_t pmd_mkdirty(pmd_t pmd)
{ {
if (pmd_large(pmd)) { pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
_SEGMENT_ENTRY_SOFT_DIRTY; pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
}
return pmd; return pmd;
} }
@ -1333,29 +1315,23 @@ static inline pud_t pud_wrprotect(pud_t pud)
static inline pud_t pud_mkwrite(pud_t pud) static inline pud_t pud_mkwrite(pud_t pud)
{ {
pud_val(pud) |= _REGION3_ENTRY_WRITE; pud_val(pud) |= _REGION3_ENTRY_WRITE;
if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY)) if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
return pud; pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
return pud; return pud;
} }
static inline pud_t pud_mkclean(pud_t pud) static inline pud_t pud_mkclean(pud_t pud)
{ {
if (pud_large(pud)) { pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; pud_val(pud) |= _REGION_ENTRY_PROTECT;
pud_val(pud) |= _REGION_ENTRY_PROTECT;
}
return pud; return pud;
} }
static inline pud_t pud_mkdirty(pud_t pud) static inline pud_t pud_mkdirty(pud_t pud)
{ {
if (pud_large(pud)) { pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
pud_val(pud) |= _REGION3_ENTRY_DIRTY | if (pud_val(pud) & _REGION3_ENTRY_WRITE)
_REGION3_ENTRY_SOFT_DIRTY; pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
if (pud_val(pud) & _REGION3_ENTRY_WRITE)
pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
}
return pud; return pud;
} }
@ -1379,38 +1355,29 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
static inline pmd_t pmd_mkyoung(pmd_t pmd) static inline pmd_t pmd_mkyoung(pmd_t pmd)
{ {
if (pmd_large(pmd)) { pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
}
return pmd; return pmd;
} }
static inline pmd_t pmd_mkold(pmd_t pmd) static inline pmd_t pmd_mkold(pmd_t pmd)
{ {
if (pmd_large(pmd)) { pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
}
return pmd; return pmd;
} }
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{ {
if (pmd_large(pmd)) { pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd;
}
pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= massage_pgprot_pmd(newprot); pmd_val(pmd) |= massage_pgprot_pmd(newprot);
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd; return pmd;
} }

View File

@ -206,7 +206,7 @@ unsigned long get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */ /* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
static inline unsigned long current_stack_pointer(void) static __always_inline unsigned long current_stack_pointer(void)
{ {
unsigned long sp; unsigned long sp;

View File

@ -276,6 +276,7 @@ struct qdio_outbuf_state {
#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080 #define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080
#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040 #define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 #define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
#define CHSC_AC2_SNIFFER_AVAILABLE 0x0008
#define CHSC_AC2_DATA_DIV_ENABLED 0x0002 #define CHSC_AC2_DATA_DIV_ENABLED 0x0002
#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000 #define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000

View File

@ -85,7 +85,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp) static inline void arch_spin_unlock(arch_spinlock_t *lp)
{ {
typecheck(int, lp->lock); typecheck(int, lp->lock);
asm volatile( asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */ ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
" sth %1,%0\n" " sth %1,%0\n"
: "=Q" (((unsigned short *) &lp->lock)[1]) : "=Q" (((unsigned short *) &lp->lock)[1])

View File

@ -38,7 +38,7 @@ static inline unsigned long get_stack_pointer(struct task_struct *task,
{ {
if (regs) if (regs)
return (unsigned long) kernel_stack_pointer(regs); return (unsigned long) kernel_stack_pointer(regs);
if (task == current) if (!task || task == current)
return current_stack_pointer(); return current_stack_pointer();
return (unsigned long) task->thread.ksp; return (unsigned long) task->thread.ksp;
} }

View File

@ -10,8 +10,9 @@
#ifndef _ASM_S390_TIMEX_H #ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H #define _ASM_S390_TIMEX_H
#include <asm/lowcore.h> #include <linux/preempt.h>
#include <linux/time64.h> #include <linux/time64.h>
#include <asm/lowcore.h>
/* The value of the TOD clock for 1.1.1970. */ /* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@ -179,22 +180,24 @@ static inline cycles_t get_cycles(void)
int get_phys_clock(unsigned long *clock); int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void); void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
extern unsigned char tod_clock_base[16] __aligned(8); extern unsigned char tod_clock_base[16] __aligned(8);
/** /**
* get_clock_monotonic - returns current time in clock rate units * get_clock_monotonic - returns current time in clock rate units
* *
* The caller must ensure that preemption is disabled.
* The clock and tod_clock_base get changed via stop_machine. * The clock and tod_clock_base get changed via stop_machine.
* Therefore preemption must be disabled when calling this * Therefore preemption must be disabled, otherwise the returned
* function, otherwise the returned value is not guaranteed to * value is not guaranteed to be monotonic.
* be monotonic.
*/ */
static inline unsigned long long get_tod_clock_monotonic(void) static inline unsigned long long get_tod_clock_monotonic(void)
{ {
return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; unsigned long long tod;
preempt_disable();
tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
preempt_enable();
return tod;
} }
/** /**

View File

@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%c%i", value); ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR) else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value); ptr += sprintf(ptr, "%%v%i", value);
else if (operand->flags & OPERAND_PCREL) else if (operand->flags & OPERAND_PCREL) {
ptr += sprintf(ptr, "%lx", (signed int) value void *pcrel = (void *)((int)value + addr);
+ addr);
else if (operand->flags & OPERAND_SIGNED) ptr += sprintf(ptr, "%px", pcrel);
} else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value); ptr += sprintf(ptr, "%i", value);
else else
ptr += sprintf(ptr, "%u", value); ptr += sprintf(ptr, "%u", value);
@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs)
else else
*ptr++ = ' '; *ptr++ = ' ';
addr = regs->psw.addr + start - 32; addr = regs->psw.addr + start - 32;
ptr += sprintf(ptr, "%016lx: ", addr); ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end) if (start + opsize >= end)
break; break;
for (i = 0; i < opsize; i++) for (i = 0; i < opsize; i++)
@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
opsize = insn_length(*code); opsize = insn_length(*code);
if (opsize > len) if (opsize > len)
break; break;
ptr += sprintf(ptr, "%p: ", code); ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++) for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]); ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t'; *ptr++ = '\t';

View File

@ -30,6 +30,7 @@
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/boot_data.h> #include <asm/boot_data.h>
#include <asm/switch_to.h>
#include "entry.h" #include "entry.h"
static void __init reset_tod_clock(void) static void __init reset_tod_clock(void)
@ -238,7 +239,7 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VX; S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17); __ctl_set_bit(0, 17);
} }
if (test_facility(130)) { if (test_facility(130) && !noexec_disabled) {
S390_lowcore.machine_flags |= MACHINE_FLAG_NX; S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
__ctl_set_bit(0, 20); __ctl_set_bit(0, 20);
} }
@ -260,6 +261,24 @@ static inline void save_vector_registers(void)
#endif #endif
} }
static inline void setup_control_registers(void)
{
unsigned long reg;
__ctl_store(reg, 0, 0);
reg |= CR0_LOW_ADDRESS_PROTECTION;
reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
reg |= CR0_EXTERNAL_CALL_SUBMASK;
__ctl_load(reg, 0, 0);
}
static inline void setup_access_registers(void)
{
unsigned int acrs[NUM_ACRS] = { 0 };
restore_access_regs(acrs);
}
static int __init disable_vector_extension(char *str) static int __init disable_vector_extension(char *str)
{ {
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
@ -268,21 +287,6 @@ static int __init disable_vector_extension(char *str)
} }
early_param("novx", disable_vector_extension); early_param("novx", disable_vector_extension);
static int __init noexec_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (!rc && !enabled) {
/* Disable no-execute support */
S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
__ctl_clear_bit(0, 20);
}
return rc;
}
early_param("noexec", noexec_setup);
static int __init cad_setup(char *str) static int __init cad_setup(char *str)
{ {
bool enabled; bool enabled;
@ -332,5 +336,7 @@ void __init startup_init(void)
save_vector_registers(); save_vector_registers();
setup_topology(); setup_topology();
sclp_early_detect(); sclp_early_detect();
setup_control_registers();
setup_access_registers();
lockdep_on(); lockdep_on();
} }

View File

@ -26,8 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base 0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base larl %r13,.LPG1 # get base
larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
# #
# Setup stack # Setup stack
# #
@ -37,19 +35,8 @@ ENTRY(startup_continue)
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
brasl %r14,kasan_early_init brasl %r14,kasan_early_init
#endif #endif
# brasl %r14,startup_init # s390 specific early init
# Early machine initialization and detection functions. brasl %r14,start_kernel # common init code
#
brasl %r14,startup_init
# check control registers
stctg %c0,%c15,0(%r15)
oi 6(%r15),0x60 # enable sigp emergency & external call
oi 4(%r15),0x10 # switch on low address proctection
lctlg %c0,%c15,0(%r15)
lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess
brasl %r14,start_kernel # go to C code
# #
# We returned from start_kernel ?!? PANIK # We returned from start_kernel ?!? PANIK
# #
@ -59,4 +46,3 @@ ENTRY(startup_continue)
.align 16 .align 16
.LPG1: .LPG1:
.Ldw: .quad 0x0002000180000000,0x0000000000000000 .Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0

View File

@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_BUS_CYCLES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1,
}; };
static int __hw_perf_event_init(struct perf_event *event) static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event)
int err = 0; int err = 0;
u64 ev; u64 ev;
switch (attr->type) { switch (type) {
case PERF_TYPE_RAW: case PERF_TYPE_RAW:
/* Raw events are used to access counters directly, /* Raw events are used to access counters directly,
* hence do not permit excludes */ * hence do not permit excludes */
@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event) static int cpumf_pmu_event_init(struct perf_event *event)
{ {
unsigned int type = event->attr.type;
int err; int err;
switch (event->attr.type) { if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
case PERF_TYPE_HARDWARE: err = __hw_perf_event_init(event, type);
case PERF_TYPE_HW_CACHE: else if (event->pmu->type == type)
case PERF_TYPE_RAW: /* Registered as unknown PMU */
err = __hw_perf_event_init(event); err = __hw_perf_event_init(event, PERF_TYPE_RAW);
break; else
default:
return -ENOENT; return -ENOENT;
}
if (unlikely(err) && event->destroy) if (unlikely(err) && event->destroy)
event->destroy(event); event->destroy(event);
@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void)
return -ENODEV; return -ENODEV;
cpumf_pmu.attr_groups = cpumf_cf_event_group(); cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
if (rc) if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
return rc; return rc;

View File

@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event)
int err = -ENOENT; int err = -ENOENT;
debug_sprintf_event(cf_diag_dbg, 5, debug_sprintf_event(cf_diag_dbg, 5,
"%s event %p cpu %d config %#llx " "%s event %p cpu %d config %#llx type:%u "
"sample_type %#llx cf_diag_events %d\n", __func__, "sample_type %#llx cf_diag_events %d\n", __func__,
event, event->cpu, attr->config, attr->sample_type, event, event->cpu, attr->config, event->pmu->type,
atomic_read(&cf_diag_events)); attr->sample_type, atomic_read(&cf_diag_events));
if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG || if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
event->attr.type != PERF_TYPE_RAW) event->attr.type != event->pmu->type)
goto out; goto out;
/* Raw events are used to access counters directly, /* Raw events are used to access counters directly,
@ -693,7 +693,7 @@ static int __init cf_diag_init(void)
} }
debug_register_view(cf_diag_dbg, &debug_sprintf_view); debug_register_view(cf_diag_dbg, &debug_sprintf_view);
rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW); rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) { if (rc) {
debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
debug_unregister(cf_diag_dbg); debug_unregister(cf_diag_dbg);

View File

@ -156,8 +156,8 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
} }
} }
debug_sprintf_event(sfdbg, 5, debug_sprintf_event(sfdbg, 5, "%s freed sdbt %p\n", __func__,
"free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); sfb->sdbt);
memset(sfb, 0, sizeof(*sfb)); memset(sfb, 0, sizeof(*sfb));
} }
@ -212,10 +212,10 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* the sampling buffer origin. * the sampling buffer origin.
*/ */
if (sfb->sdbt != get_next_sdbt(tail)) { if (sfb->sdbt != get_next_sdbt(tail)) {
debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " debug_sprintf_event(sfdbg, 3, "%s: "
"sampling buffer is not linked: origin=%p" "sampling buffer is not linked: origin %p"
"tail=%p\n", " tail %p\n", __func__,
(void *) sfb->sdbt, (void *) tail); (void *)sfb->sdbt, (void *)tail);
return -EINVAL; return -EINVAL;
} }
@ -252,7 +252,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->tail = tail; sfb->tail = tail;
debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
" settings: sdbt=%lu sdb=%lu\n", " settings: sdbt %lu sdb %lu\n",
sfb->num_sdbt, sfb->num_sdb); sfb->num_sdbt, sfb->num_sdb);
return rc; return rc;
} }
@ -293,11 +293,11 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
if (rc) { if (rc) {
free_sampling_buffer(sfb); free_sampling_buffer(sfb);
debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
"realloc_sampling_buffer failed with rc=%i\n", rc); "realloc_sampling_buffer failed with rc %i\n", rc);
} else } else
debug_sprintf_event(sfdbg, 4, debug_sprintf_event(sfdbg, 4,
"alloc_sampling_buffer: tear=%p dear=%p\n", "alloc_sampling_buffer: tear %p dear %p\n",
sfb->sdbt, (void *) *sfb->sdbt); sfb->sdbt, (void *)*sfb->sdbt);
return rc; return rc;
} }
@ -404,8 +404,8 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
return 0; return 0;
debug_sprintf_event(sfdbg, 3, debug_sprintf_event(sfdbg, 3,
"allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" "%s: rate %lu f %lu sdb %lu/%lu"
" sample_size=%lu cpuhw=%p\n", " sample_size %lu cpuhw %p\n", __func__,
SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
sample_size, cpuhw); sample_size, cpuhw);
@ -465,8 +465,8 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num) if (num)
sfb_account_allocs(num, hwc); sfb_account_allocs(num, hwc);
debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow %llu ratio %lu"
" num=%lu\n", OVERFLOW_REG(hwc), ratio, num); " num %lu\n", OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0; OVERFLOW_REG(hwc) = 0;
} }
@ -505,11 +505,11 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
if (rc) if (rc)
debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
"failed with rc=%i\n", rc); "failed with rc %i\n", rc);
if (sfb_has_pending_allocs(sfb, hwc)) if (sfb_has_pending_allocs(sfb, hwc))
debug_sprintf_event(sfdbg, 5, "sfb: extend: " debug_sprintf_event(sfdbg, 5, "sfb: extend: "
"req=%lu alloc=%lu remaining=%lu\n", "req %lu alloc %lu remaining %lu\n",
num, sfb->num_sdb - num_old, num, sfb->num_sdb - num_old,
sfb_pending_allocs(sfb, hwc)); sfb_pending_allocs(sfb, hwc));
} }
@ -538,20 +538,22 @@ static void setup_pmc_cpu(void *flags)
err = sf_disable(); err = sf_disable();
if (err) if (err)
pr_err("Switching off the sampling facility failed " pr_err("Switching off the sampling facility failed "
"with rc=%i\n", err); "with rc %i\n", err);
debug_sprintf_event(sfdbg, 5, debug_sprintf_event(sfdbg, 5,
"setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); "%s: initialized: cpuhw %p\n", __func__,
cpusf);
break; break;
case PMC_RELEASE: case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED; cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable(); err = sf_disable();
if (err) { if (err) {
pr_err("Switching off the sampling facility failed " pr_err("Switching off the sampling facility failed "
"with rc=%i\n", err); "with rc %i\n", err);
} else } else
deallocate_buffers(cpusf); deallocate_buffers(cpusf);
debug_sprintf_event(sfdbg, 5, debug_sprintf_event(sfdbg, 5,
"setup_pmc_cpu: released: cpuhw=%p\n", cpusf); "%s: released: cpuhw %p\n", __func__,
cpusf);
break; break;
} }
if (err) if (err)
@ -744,7 +746,7 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
SAMPL_RATE(hwc) = rate; SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc)); hw_init_period(hwc, SAMPL_RATE(hwc));
debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:" debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:"
"cpu:%d period:%llx freq:%d,%#lx\n", event->cpu, "cpu:%d period:%#llx freq:%d,%#lx\n", event->cpu,
event->attr.sample_period, event->attr.freq, event->attr.sample_period, event->attr.freq,
SAMPLE_FREQ_MODE(hwc)); SAMPLE_FREQ_MODE(hwc));
return 0; return 0;
@ -963,7 +965,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
err = lsctl(&cpuhw->lsctl); err = lsctl(&cpuhw->lsctl);
if (err) { if (err) {
cpuhw->flags &= ~PMU_F_ENABLED; cpuhw->flags &= ~PMU_F_ENABLED;
pr_err("Loading sampling controls failed: op=%i err=%i\n", pr_err("Loading sampling controls failed: op %i err %i\n",
1, err); 1, err);
return; return;
} }
@ -971,8 +973,8 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */ /* Load current program parameter */
lpp(&S390_lowcore.lpp); lpp(&S390_lowcore.lpp);
debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " debug_sprintf_event(sfdbg, 6, "pmu_enable: es %i cs %i ed %i cd %i "
"interval:%lx tear=%p dear=%p\n", "interval %#lx tear %p dear %p\n",
cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
cpuhw->lsctl.cd, cpuhw->lsctl.interval, cpuhw->lsctl.cd, cpuhw->lsctl.interval,
(void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.tear,
@ -999,13 +1001,14 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
err = lsctl(&inactive); err = lsctl(&inactive);
if (err) { if (err) {
pr_err("Loading sampling controls failed: op=%i err=%i\n", pr_err("Loading sampling controls failed: op %i err %i\n",
2, err); 2, err);
return; return;
} }
/* Save state of TEAR and DEAR register contents */ /* Save state of TEAR and DEAR register contents */
if (!qsi(&si)) { err = qsi(&si);
if (!err) {
/* TEAR/DEAR values are valid only if the sampling facility is /* TEAR/DEAR values are valid only if the sampling facility is
* enabled. Note that cpumsf_pmu_disable() might be called even * enabled. Note that cpumsf_pmu_disable() might be called even
* for a disabled sampling facility because cpumsf_pmu_enable() * for a disabled sampling facility because cpumsf_pmu_enable()
@ -1017,7 +1020,7 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
} }
} else } else
debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
"qsi() failed with err=%i\n", err); "qsi() failed with err %i\n", err);
cpuhw->flags &= ~PMU_F_ENABLED; cpuhw->flags &= ~PMU_F_ENABLED;
} }
@ -1130,15 +1133,6 @@ static void perf_event_count_update(struct perf_event *event, u64 count)
local64_add(count, &event->count); local64_add(count, &event->count);
} }
static void debug_sample_entry(struct hws_basic_entry *sample,
struct hws_trailer_entry *te)
{
debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
"sampling data entry: te->f=%i basic.def=%04x "
"(%p)\n",
te->f, sample->def, sample);
}
/* hw_collect_samples() - Walk through a sample-data-block and collect samples /* hw_collect_samples() - Walk through a sample-data-block and collect samples
* @event: The perf event * @event: The perf event
* @sdbt: Sample-data-block table * @sdbt: Sample-data-block table
@ -1192,7 +1186,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
/* Count discarded samples */ /* Count discarded samples */
*overflow += 1; *overflow += 1;
} else { } else {
debug_sample_entry(sample, te); debug_sprintf_event(sfdbg, 4,
"%s: Found unknown"
" sampling data entry: te->f %i"
" basic.def %#4x (%p)\n", __func__,
te->f, sample->def, sample);
/* Sample slot is not yet written or other record. /* Sample slot is not yet written or other record.
* *
* This condition can occur if the buffer was reused * This condition can occur if the buffer was reused
@ -1267,9 +1265,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow += te->overflow; sampl_overflow += te->overflow;
/* Timestamps are valid for full sample-data-blocks only */ /* Timestamps are valid for full sample-data-blocks only */
debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " debug_sprintf_event(sfdbg, 6, "%s: sdbt %p "
"overflow=%llu timestamp=%#llx\n", "overflow %llu timestamp %#llx\n",
sdbt, te->overflow, __func__, sdbt, te->overflow,
(te->f) ? trailer_timestamp(te) : 0ULL); (te->f) ? trailer_timestamp(te) : 0ULL);
/* Collect all samples from a single sample-data-block and /* Collect all samples from a single sample-data-block and
@ -1313,9 +1311,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb); sampl_overflow, 1 + num_sdb);
if (sampl_overflow || event_overflow) if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " debug_sprintf_event(sfdbg, 4, "%s: "
"overflow stats: sample=%llu event=%llu\n", "overflow stats: sample %llu event %llu\n",
sampl_overflow, event_overflow); __func__, sampl_overflow, event_overflow);
} }
#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
@ -1368,7 +1366,7 @@ static void aux_output_end(struct perf_output_handle *handle)
te = aux_sdb_trailer(aux, aux->alert_mark); te = aux_sdb_trailer(aux, aux->alert_mark);
te->flags &= ~SDB_TE_ALERT_REQ_MASK; te->flags &= ~SDB_TE_ALERT_REQ_MASK;
debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i); debug_sprintf_event(sfdbg, 6, "%s: collect %#lx SDBs\n", __func__, i);
} }
/* /*
@ -1428,8 +1426,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
debug_sprintf_event(sfdbg, 6, "aux_output_begin: " debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
"head->alert_mark->empty_mark (num_alert, range)" "head->alert_mark->empty_mark (num_alert, range)"
"[%lx -> %lx -> %lx] (%lx, %lx) " "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) "
"tear index %lx, tear %lx dear %lx\n", "tear index %#lx, tear %#lx dear %#lx\n",
aux->head, aux->alert_mark, aux->empty_mark, aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range, AUX_SDB_NUM_ALERT(aux), range,
head / CPUM_SF_SDB_PER_TABLE, head / CPUM_SF_SDB_PER_TABLE,
@ -1596,13 +1594,13 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size); perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu " pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", num_sdb); "pages to overflow\n", num_sdb);
debug_sprintf_event(sfdbg, 1, "head %lx range %lx " debug_sprintf_event(sfdbg, 1, "head %#lx range %#lx "
"overflow %llx\n", "overflow %#llx\n",
aux->head, range, overflow); aux->head, range, overflow);
} else { } else {
size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size); perf_aux_output_end(&cpuhw->handle, size);
debug_sprintf_event(sfdbg, 6, "head %lx alert %lx " debug_sprintf_event(sfdbg, 6, "head %#lx alert %#lx "
"already full, try another\n", "already full, try another\n",
aux->head, aux->alert_mark); aux->head, aux->alert_mark);
} }
@ -1610,7 +1608,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
if (done) if (done)
debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: " debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
"[%lx -> %lx -> %lx] (%lx, %lx)\n", "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n",
aux->head, aux->alert_mark, aux->empty_mark, aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range); AUX_SDB_NUM_ALERT(aux), range);
} }
@ -1800,7 +1798,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
SAMPL_RATE(&event->hw) = rate; SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:" debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:"
"cpu:%d value:%llx period:%llx freq:%d\n", "cpu:%d value:%#llx period:%#llx freq:%d\n",
event->cpu, value, event->cpu, value,
event->attr.sample_period, do_freq); event->attr.sample_period, do_freq);
return 0; return 0;
@ -2111,7 +2109,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
sfb_set_limits(min, max); sfb_set_limits(min, max);
pr_info("The sampling buffer limits have changed to: " pr_info("The sampling buffer limits have changed to: "
"min=%lu max=%lu (diag=x%lu)\n", "min %lu max %lu (diag %lu)\n",
CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
return 0; return 0;
} }
@ -2129,7 +2127,7 @@ static const struct kernel_param_ops param_ops_sfb_size = {
static void __init pr_cpumsf_err(unsigned int reason) static void __init pr_cpumsf_err(unsigned int reason)
{ {
pr_err("Sampling facility support for perf is not available: " pr_err("Sampling facility support for perf is not available: "
"reason=%04x\n", reason); "reason %#x\n", reason);
} }
static int __init init_cpum_sampling_pmu(void) static int __init init_cpum_sampling_pmu(void)

View File

@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct unwind_state state; struct unwind_state state;
unsigned long addr;
unwind_for_each_frame(&state, current, regs, 0) unwind_for_each_frame(&state, current, regs, 0) {
perf_callchain_store(entry, state.ip); addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
}
} }
/* Perf definitions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */

View File

@ -40,6 +40,7 @@
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/runtime_instr.h> #include <asm/runtime_instr.h>
#include <asm/unwind.h>
#include "entry.h" #include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@ -178,9 +179,8 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
struct stack_frame *sf, *low, *high; struct unwind_state state;
unsigned long return_address; unsigned long ip = 0;
int count;
if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
return 0; return 0;
@ -188,26 +188,22 @@ unsigned long get_wchan(struct task_struct *p)
if (!try_get_task_stack(p)) if (!try_get_task_stack(p))
return 0; return 0;
low = task_stack_page(p); unwind_for_each_frame(&state, p, NULL, 0) {
high = (struct stack_frame *) task_pt_regs(p); if (state.stack_info.type != STACK_TYPE_TASK) {
sf = (struct stack_frame *) p->thread.ksp; ip = 0;
if (sf <= low || sf > high) { break;
return_address = 0;
goto out;
}
for (count = 0; count < 16; count++) {
sf = (struct stack_frame *)READ_ONCE_NOCHECK(sf->back_chain);
if (sf <= low || sf > high) {
return_address = 0;
goto out;
} }
return_address = READ_ONCE_NOCHECK(sf->gprs[8]);
if (!in_sched_functions(return_address)) ip = unwind_get_return_address(&state);
goto out; if (!ip)
break;
if (!in_sched_functions(ip))
break;
} }
out:
put_task_stack(p); put_task_stack(p);
return return_address; return ip;
} }
unsigned long arch_align_stack(unsigned long sp) unsigned long arch_align_stack(unsigned long sp)

View File

@ -724,39 +724,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
static int smp_add_present_cpu(int cpu); static int smp_add_present_cpu(int cpu);
static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
bool configured, bool early)
{ {
struct pcpu *pcpu; struct pcpu *pcpu;
cpumask_t avail; int cpu, nr, i;
int cpu, nr, i, j;
u16 address; u16 address;
nr = 0; nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); if (sclp.has_core_type && core->type != boot_core_type)
cpu = cpumask_first(&avail); return nr;
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { cpu = cpumask_first(avail);
if (sclp.has_core_type && info->core[i].type != boot_core_type) address = core->core_id << smp_cpu_mt_shift;
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
if (pcpu_find_address(cpu_present_mask, address + i))
continue; continue;
address = info->core[i].core_id << smp_cpu_mt_shift; pcpu = pcpu_devices + cpu;
for (j = 0; j <= smp_cpu_mtid; j++) { pcpu->address = address + i;
if (pcpu_find_address(cpu_present_mask, address + j)) if (configured)
continue; pcpu->state = CPU_STATE_CONFIGURED;
pcpu = pcpu_devices + cpu; else
pcpu->address = address + j; pcpu->state = CPU_STATE_STANDBY;
pcpu->state = smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
(cpu >= info->configured*(smp_cpu_mtid + 1)) ? set_cpu_present(cpu, true);
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; if (!early && smp_add_present_cpu(cpu) != 0)
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); set_cpu_present(cpu, false);
set_cpu_present(cpu, true); else
if (sysfs_add && smp_add_present_cpu(cpu) != 0) nr++;
set_cpu_present(cpu, false); cpumask_clear_cpu(cpu, avail);
else cpu = cpumask_next(cpu, avail);
nr++; }
cpu = cpumask_next(cpu, &avail); return nr;
if (cpu >= nr_cpu_ids) }
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
/*
* Add IPL core first (which got logical CPU number 0) to make sure
* that all SMT threads get subsequent logical CPU numbers.
*/
if (early) {
core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
for (i = 0; i < info->configured; i++) {
core = &info->core[i];
if (core->core_id == core_id) {
nr += smp_add_core(core, &avail, true, early);
break; break;
}
} }
} }
for (i = 0; i < info->combined; i++) {
configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early);
}
return nr; return nr;
} }
@ -805,7 +833,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */ /* Add CPUs present at boot */
get_online_cpus(); get_online_cpus();
__smp_rescan_cpus(info, 0); __smp_rescan_cpus(info, true);
put_online_cpus(); put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info)); memblock_free_early((unsigned long)info, sizeof(*info));
} }
@ -1148,7 +1176,7 @@ int __ref smp_rescan_cpus(void)
smp_get_core_info(info, 0); smp_get_core_info(info, 0);
get_online_cpus(); get_online_cpus();
mutex_lock(&smp_cpu_state_mutex); mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1); nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex); mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus(); put_online_cpus();
kfree(info); kfree(info);

View File

@ -110,15 +110,6 @@ unsigned long long notrace sched_clock(void)
} }
NOKPROBE_SYMBOL(sched_clock); NOKPROBE_SYMBOL(sched_clock);
/*
* Monotonic_clock - returns # of nanoseconds passed since time_init()
*/
unsigned long long monotonic_clock(void)
{
return sched_clock();
}
EXPORT_SYMBOL(monotonic_clock);
static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt) static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
{ {
unsigned long long high, low, rem, sec, nsec; unsigned long long high, low, rem, sec, nsec;

View File

@ -85,12 +85,7 @@ bool unwind_next_frame(struct unwind_state *state)
} }
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
/* Decode any ftrace redirection */
if (ip == (unsigned long) return_to_handler)
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
ip, (void *) sp);
#endif
/* Update unwind state */ /* Update unwind state */
state->sp = sp; state->sp = sp;
@ -147,12 +142,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
reuse_sp = false; reuse_sp = false;
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
/* Decode any ftrace redirection */
if (ip == (unsigned long) return_to_handler)
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
ip, NULL);
#endif
/* Update unwind state */ /* Update unwind state */
state->sp = sp; state->sp = sp;

View File

@ -74,7 +74,7 @@ static inline int arch_load_niai4(int *lock)
{ {
int owner; int owner;
asm volatile( asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */ ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
" l %0,%1\n" " l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory"); : "=d" (owner) : "Q" (*lock) : "memory");
@ -85,7 +85,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
{ {
int expected = old; int expected = old;
asm volatile( asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */ ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
" cs %0,%3,%1\n" " cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock) : "=d" (old), "=Q" (*lock)

View File

@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size)
spin_unlock_irqrestore(&s390_kernel_write_lock, flags); spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
} }
static int __memcpy_real(void *dest, void *src, size_t count) static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{ {
register unsigned long _dest asm("2") = (unsigned long) dest; register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count; register unsigned long _len1 asm("3") = (unsigned long) count;
@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count)
return rc; return rc;
} }
static unsigned long _memcpy_real(unsigned long dest, unsigned long src, static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
unsigned long count) unsigned long src,
unsigned long count)
{ {
int irqs_disabled, rc; int irqs_disabled, rc;
unsigned long flags; unsigned long flags;
if (!count) if (!count)
return 0; return 0;
flags = __arch_local_irq_stnsm(0xf8UL); flags = arch_local_irq_save();
irqs_disabled = arch_irqs_disabled_flags(flags); irqs_disabled = arch_irqs_disabled_flags(flags);
if (!irqs_disabled) if (!irqs_disabled)
trace_hardirqs_off(); trace_hardirqs_off();
__arch_local_irq_stnsm(0xf8); // disable DAT
rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
if (flags & PSW_MASK_DAT)
__arch_local_irq_stosm(0x04); // enable DAT
if (!irqs_disabled) if (!irqs_disabled)
trace_hardirqs_on(); trace_hardirqs_on();
__arch_local_irq_ssm(flags); __arch_local_irq_ssm(flags);

View File

@ -5,7 +5,7 @@
# The following is required for define_trace.h to find ./trace.h # The following is required for define_trace.h to find ./trace.h
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
CFLAGS_vfio_ccw_fsm.o := -I$(src) CFLAGS_vfio_ccw_trace.o := -I$(src)
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o obj-$(CONFIG_QDIO) += qdio.o
vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \ vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
vfio_ccw_async.o vfio_ccw_async.o vfio_ccw_trace.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o

View File

@ -252,9 +252,6 @@ struct qdio_q {
/* input or output queue */ /* input or output queue */
int is_input_q; int is_input_q;
/* list of thinint input queues */
struct list_head entry;
/* upper-layer program handler */ /* upper-layer program handler */
qdio_handler_t (*handler); qdio_handler_t (*handler);
@ -272,6 +269,7 @@ struct qdio_irq {
struct qib qib; struct qib qib;
u32 *dsci; /* address of device state change indicator */ u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev; struct ccw_device *cdev;
struct list_head entry; /* list of thinint devices */
struct dentry *debugfs_dev; struct dentry *debugfs_dev;
struct dentry *debugfs_perf; struct dentry *debugfs_perf;
@ -317,13 +315,15 @@ struct qdio_irq {
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) #define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
#define qperf_inc(__q, __attr) \ #define QDIO_PERF_STAT_INC(__irq, __attr) \
({ \ ({ \
struct qdio_irq *qdev = (__q)->irq_ptr; \ struct qdio_irq *qdev = __irq; \
if (qdev->perf_stat_enabled) \ if (qdev->perf_stat_enabled) \
(qdev->perf_stat.__attr)++; \ (qdev->perf_stat.__attr)++; \
}) })
#define qperf_inc(__q, __attr) QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr)
static inline void account_sbals_error(struct qdio_q *q, int count) static inline void account_sbals_error(struct qdio_q *q, int count)
{ {
q->q_stats.nr_sbal_error += count; q->q_stats.nr_sbal_error += count;
@ -355,14 +355,10 @@ static inline int multicast_outbound(struct qdio_q *q)
for (i = 0; i < irq_ptr->nr_output_qs && \ for (i = 0; i < irq_ptr->nr_output_qs && \
({ q = irq_ptr->output_qs[i]; 1; }); i++) ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \ #define add_buf(bufnr, inc) QDIO_BUFNR((bufnr) + (inc))
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) #define next_buf(bufnr) add_buf(bufnr, 1)
#define next_buf(bufnr) \ #define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec))
((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) #define prev_buf(bufnr) sub_buf(bufnr, 1)
#define add_buf(bufnr, inc) \
((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
#define sub_buf(bufnr, dec) \
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
#define queue_irqs_enabled(q) \ #define queue_irqs_enabled(q) \
(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
@ -375,8 +371,8 @@ extern u64 last_ai_time;
void qdio_setup_thinint(struct qdio_irq *irq_ptr); void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); void tiqdio_add_device(struct qdio_irq *irq_ptr);
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); void tiqdio_remove_device(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q); void tiqdio_inbound_processing(unsigned long q);
int tiqdio_allocate_memory(void); int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void); void tiqdio_free_memory(void);

View File

@ -131,7 +131,7 @@ again:
case 96: case 96:
/* not all buffers processed */ /* not all buffers processed */
qperf_inc(q, eqbs_partial); qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
tmp_count); tmp_count);
return count - tmp_count; return count - tmp_count;
case 97: case 97:
@ -423,9 +423,6 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count)
static void process_buffer_error(struct qdio_q *q, unsigned int start, static void process_buffer_error(struct qdio_q *q, unsigned int start,
int count) int count)
{ {
unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
SLSB_P_OUTPUT_NOT_INIT;
q->qdio_error = QDIO_ERROR_SLSB_STATE; q->qdio_error = QDIO_ERROR_SLSB_STATE;
/* special handling for no target buffer empty */ /* special handling for no target buffer empty */
@ -433,7 +430,7 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
q->sbal[start]->element[15].sflags == 0x10) { q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full); qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
goto set; return;
} }
DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@ -442,13 +439,6 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
DBF_ERROR("F14:%2x F15:%2x", DBF_ERROR("F14:%2x F15:%2x",
q->sbal[start]->element[14].sflags, q->sbal[start]->element[14].sflags,
q->sbal[start]->element[15].sflags); q->sbal[start]->element[15].sflags);
set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
set_buf_states(q, start, state, count);
} }
static inline void inbound_primed(struct qdio_q *q, unsigned int start, static inline void inbound_primed(struct qdio_q *q, unsigned int start,
@ -530,6 +520,11 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
return count; return count;
case SLSB_P_INPUT_ERROR: case SLSB_P_INPUT_ERROR:
process_buffer_error(q, start, count); process_buffer_error(q, start, count);
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0) if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full); qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled) if (q->irq_ptr->perf_stat_enabled)
@ -963,7 +958,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
/* skip if polling is enabled or already in work */ /* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) { &q->u.in.queue_irq_state)) {
qperf_inc(q, int_discarded); QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
continue; continue;
} }
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
@ -1162,7 +1157,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
*/ */
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
tiqdio_remove_input_queues(irq_ptr); tiqdio_remove_device(irq_ptr);
qdio_shutdown_queues(cdev); qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr); qdio_shutdown_debug_entries(irq_ptr);
@ -1284,6 +1279,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
init_data->no_output_qs)) init_data->no_output_qs))
goto out_rel; goto out_rel;
INIT_LIST_HEAD(&irq_ptr->entry);
init_data->cdev->private->qdio_data = irq_ptr; init_data->cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0; return 0;
@ -1428,7 +1424,7 @@ int qdio_activate(struct ccw_device *cdev)
} }
if (is_thinint_irq(irq_ptr)) if (is_thinint_irq(irq_ptr))
tiqdio_add_input_queues(irq_ptr); tiqdio_add_device(irq_ptr);
/* wait for subchannel to become active */ /* wait for subchannel to become active */
msleep(5); msleep(5);

View File

@ -150,7 +150,6 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM; return -ENOMEM;
} }
irq_ptr_qs[i] = q; irq_ptr_qs[i] = q;
INIT_LIST_HEAD(&q->entry);
} }
return 0; return 0;
} }
@ -179,7 +178,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i); q->mask = 1 << (31 - i);
q->nr = i; q->nr = i;
q->handler = handler; q->handler = handler;
INIT_LIST_HEAD(&q->entry);
} }
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,

View File

@ -39,14 +39,6 @@ struct indicator_t {
static LIST_HEAD(tiq_list); static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock); static DEFINE_MUTEX(tiq_list_lock);
/* Adapter interrupt definitions */
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
static struct airq_struct tiqdio_airq = {
.handler = tiqdio_thinint_handler,
.isc = QDIO_AIRQ_ISC,
};
static struct indicator_t *q_indicators; static struct indicator_t *q_indicators;
u64 last_ai_time; u64 last_ai_time;
@ -74,26 +66,20 @@ static void put_indicator(u32 *addr)
atomic_dec(&ind->count); atomic_dec(&ind->count);
} }
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) void tiqdio_add_device(struct qdio_irq *irq_ptr)
{ {
mutex_lock(&tiq_list_lock); mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); list_add_rcu(&irq_ptr->entry, &tiq_list);
mutex_unlock(&tiq_list_lock); mutex_unlock(&tiq_list_lock);
} }
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) void tiqdio_remove_device(struct qdio_irq *irq_ptr)
{ {
struct qdio_q *q;
q = irq_ptr->input_qs[0];
if (!q)
return;
mutex_lock(&tiq_list_lock); mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry); list_del_rcu(&irq_ptr->entry);
mutex_unlock(&tiq_list_lock); mutex_unlock(&tiq_list_lock);
synchronize_rcu(); synchronize_rcu();
INIT_LIST_HEAD(&q->entry); INIT_LIST_HEAD(&irq_ptr->entry);
} }
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@ -154,7 +140,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/* skip if polling is enabled or already in work */ /* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) { &q->u.in.queue_irq_state)) {
qperf_inc(q, int_discarded); QDIO_PERF_STAT_INC(irq, int_discarded);
continue; continue;
} }
@ -182,7 +168,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{ {
u32 si_used = clear_shared_ind(); u32 si_used = clear_shared_ind();
struct qdio_q *q; struct qdio_irq *irq;
last_ai_time = S390_lowcore.int_clock; last_ai_time = S390_lowcore.int_clock;
inc_irq_stat(IRQIO_QAI); inc_irq_stat(IRQIO_QAI);
@ -190,12 +176,8 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
/* protect tiq_list entries, only changed in activate or shutdown */ /* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock(); rcu_read_lock();
/* check for work on all inbound thinint queues */ list_for_each_entry_rcu(irq, &tiq_list, entry) {
list_for_each_entry_rcu(q, &tiq_list, entry) {
struct qdio_irq *irq;
/* only process queues from changed sets */ /* only process queues from changed sets */
irq = q->irq_ptr;
if (unlikely(references_shared_dsci(irq))) { if (unlikely(references_shared_dsci(irq))) {
if (!si_used) if (!si_used)
continue; continue;
@ -204,11 +186,16 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
tiqdio_call_inq_handlers(irq); tiqdio_call_inq_handlers(irq);
qperf_inc(q, adapter_int); QDIO_PERF_STAT_INC(irq, adapter_int);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
static struct airq_struct tiqdio_airq = {
.handler = tiqdio_thinint_handler,
.isc = QDIO_AIRQ_ISC,
};
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{ {
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;

View File

@ -15,6 +15,7 @@
#include <asm/scsw.h> #include <asm/scsw.h>
#include "orb.h" #include "orb.h"
#include "vfio_ccw_trace.h"
/* /*
* Max length for ccw chain. * Max length for ccw chain.

View File

@ -15,9 +15,6 @@
#include "ioasm.h" #include "ioasm.h"
#include "vfio_ccw_private.h" #include "vfio_ccw_private.h"
#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"
static int fsm_io_helper(struct vfio_ccw_private *private) static int fsm_io_helper(struct vfio_ccw_private *private)
{ {
struct subchannel *sch; struct subchannel *sch;
@ -321,8 +318,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
} }
err_out: err_out:
trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid, trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
io_region->ret_code, errstr); io_region->ret_code, errstr);
} }
/* /*
@ -344,6 +341,10 @@ static void fsm_async_request(struct vfio_ccw_private *private,
/* should not happen? */ /* should not happen? */
cmd_region->ret_code = -EINVAL; cmd_region->ret_code = -EINVAL;
} }
trace_vfio_ccw_fsm_async_request(get_schid(private),
cmd_region->command,
cmd_region->ret_code);
} }
/* /*

View File

@ -135,6 +135,7 @@ extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private, static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
int event) int event)
{ {
trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event); vfio_ccw_jumptable[private->state][event](private, event);
} }

View File

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoint definitions for vfio_ccw
*
* Copyright IBM Corp. 2019
* Author(s): Eric Farman <farman@linux.ibm.com>
*/
#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);

View File

@ -7,6 +7,8 @@
* Halil Pasic <pasic@linux.vnet.ibm.com> * Halil Pasic <pasic@linux.vnet.ibm.com>
*/ */
#include "cio.h"
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM vfio_ccw #define TRACE_SYSTEM vfio_ccw
@ -15,28 +17,88 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
TRACE_EVENT(vfio_ccw_io_fctl, TRACE_EVENT(vfio_ccw_fsm_async_request,
TP_PROTO(struct subchannel_id schid,
int command,
int errno),
TP_ARGS(schid, command, errno),
TP_STRUCT__entry(
__field(u8, cssid)
__field(u8, ssid)
__field(u16, sch_no)
__field(int, command)
__field(int, errno)
),
TP_fast_assign(
__entry->cssid = schid.cssid;
__entry->ssid = schid.ssid;
__entry->sch_no = schid.sch_no;
__entry->command = command;
__entry->errno = errno;
),
TP_printk("schid=%x.%x.%04x command=0x%x errno=%d",
__entry->cssid,
__entry->ssid,
__entry->sch_no,
__entry->command,
__entry->errno)
);
TRACE_EVENT(vfio_ccw_fsm_event,
TP_PROTO(struct subchannel_id schid, int state, int event),
TP_ARGS(schid, state, event),
TP_STRUCT__entry(
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
__field(int, state)
__field(int, event)
),
TP_fast_assign(
__entry->cssid = schid.cssid;
__entry->ssid = schid.ssid;
__entry->schno = schid.sch_no;
__entry->state = state;
__entry->event = event;
),
TP_printk("schid=%x.%x.%04x state=%d event=%d",
__entry->cssid, __entry->ssid, __entry->schno,
__entry->state,
__entry->event)
);
TRACE_EVENT(vfio_ccw_fsm_io_request,
TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr), TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
TP_ARGS(fctl, schid, errno, errstr), TP_ARGS(fctl, schid, errno, errstr),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u8, cssid)
__field(u8, ssid)
__field(u16, sch_no)
__field(int, fctl) __field(int, fctl)
__field_struct(struct subchannel_id, schid)
__field(int, errno) __field(int, errno)
__field(char*, errstr) __field(char*, errstr)
), ),
TP_fast_assign( TP_fast_assign(
__entry->cssid = schid.cssid;
__entry->ssid = schid.ssid;
__entry->sch_no = schid.sch_no;
__entry->fctl = fctl; __entry->fctl = fctl;
__entry->schid = schid;
__entry->errno = errno; __entry->errno = errno;
__entry->errstr = errstr; __entry->errstr = errstr;
), ),
TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s", TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s",
__entry->schid.cssid, __entry->cssid,
__entry->schid.ssid, __entry->ssid,
__entry->schid.sch_no, __entry->sch_no,
__entry->fctl, __entry->fctl,
__entry->errno, __entry->errno,
__entry->errstr) __entry->errstr)

View File

@ -715,36 +715,18 @@ out:
static void *_copy_key_from_user(void __user *ukey, size_t keylen) static void *_copy_key_from_user(void __user *ukey, size_t keylen)
{ {
void *kkey;
if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE) if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
kkey = kmalloc(keylen, GFP_KERNEL);
if (!kkey)
return ERR_PTR(-ENOMEM);
if (copy_from_user(kkey, ukey, keylen)) {
kfree(kkey);
return ERR_PTR(-EFAULT);
}
return kkey; return memdup_user(ukey, keylen);
} }
static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
{ {
void *kapqns = NULL; if (!uapqns || nr_apqns == 0)
size_t nbytes; return NULL;
if (uapqns && nr_apqns > 0) { return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
nbytes = nr_apqns * sizeof(struct pkey_apqn);
kapqns = kmalloc(nbytes, GFP_KERNEL);
if (!kapqns)
return ERR_PTR(-ENOMEM);
if (copy_from_user(kapqns, uapqns, nbytes))
return ERR_PTR(-EFAULT);
}
return kapqns;
} }
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,

View File

@ -13,8 +13,6 @@
#include "qeth_core.h" #include "qeth_core.h"
#include <linux/hashtable.h> #include <linux/hashtable.h>
#define QETH_SNIFF_AVAIL 0x0008
enum qeth_ip_types { enum qeth_ip_types {
QETH_IP_TYPE_NORMAL, QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA, QETH_IP_TYPE_VIPA,

View File

@ -228,7 +228,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
break; break;
case 1: case 1:
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) { if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
card->options.sniffer = i; card->options.sniffer = i;
if (card->qdio.init_pool.buf_count != if (card->qdio.init_pool.buf_count !=
QETH_IN_BUF_COUNT_MAX) QETH_IN_BUF_COUNT_MAX)