Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2018-12-21 15:06:20 -08:00
commit ce28bb4453
63 changed files with 777 additions and 637 deletions

View File

@ -2633,6 +2633,13 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/axentia,* F: Documentation/devicetree/bindings/sound/axentia,*
F: sound/soc/atmel/tse850-pcm5142.c F: sound/soc/atmel/tse850-pcm5142.c
AXXIA I2C CONTROLLER
M: Krzysztof Adamski <krzysztof.adamski@nokia.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-axxia.txt
F: drivers/i2c/busses/i2c-axxia.c
AZ6007 DVB DRIVER AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
@ -4050,7 +4057,7 @@ S: Maintained
F: drivers/media/dvb-frontends/cxd2820r* F: drivers/media/dvb-frontends/cxd2820r*
CXGB3 ETHERNET DRIVER (CXGB3) CXGB3 ETHERNET DRIVER (CXGB3)
M: Santosh Raspatur <santosh@chelsio.com> M: Arjun Vynipadath <arjun@chelsio.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.chelsio.com W: http://www.chelsio.com
S: Supported S: Supported
@ -4079,7 +4086,7 @@ S: Supported
F: drivers/crypto/chelsio F: drivers/crypto/chelsio
CXGB4 ETHERNET DRIVER (CXGB4) CXGB4 ETHERNET DRIVER (CXGB4)
M: Ganesh Goudar <ganeshgr@chelsio.com> M: Arjun Vynipadath <arjun@chelsio.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.chelsio.com W: http://www.chelsio.com
S: Supported S: Supported

View File

@ -1076,7 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
# version.h and scripts_basic is processed / created. # version.h and scripts_basic is processed / created.
# Listed in dependency order # Listed in dependency order
PHONY += prepare archprepare macroprepare prepare0 prepare1 prepare2 prepare3 PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory, # prepare3 is used to check if we are building in a separate output directory,
# and if so do: # and if so do:
@ -1099,9 +1099,7 @@ prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir) $(cmd_crmodverdir)
macroprepare: prepare1 archmacros archprepare: archheaders archscripts prepare1 scripts_basic
archprepare: archheaders archscripts macroprepare scripts_basic
prepare0: archprepare gcc-plugins prepare0: archprepare gcc-plugins
$(Q)$(MAKE) $(build)=. $(Q)$(MAKE) $(build)=.
@ -1177,9 +1175,6 @@ archheaders:
PHONY += archscripts PHONY += archscripts
archscripts: archscripts:
PHONY += archmacros
archmacros:
PHONY += __headers PHONY += __headers
__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts __headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef $(Q)$(MAKE) $(build)=scripts build_unifdef

View File

@ -310,25 +310,24 @@ void __init setup_arch(char **cmdline_p)
register_console(&prom_early_console); register_console(&prom_early_console);
printk("ARCH: ");
switch(sparc_cpu_model) { switch(sparc_cpu_model) {
case sun4m: case sun4m:
printk("SUN4M\n"); pr_info("ARCH: SUN4M\n");
break; break;
case sun4d: case sun4d:
printk("SUN4D\n"); pr_info("ARCH: SUN4D\n");
break; break;
case sun4e: case sun4e:
printk("SUN4E\n"); pr_info("ARCH: SUN4E\n");
break; break;
case sun4u: case sun4u:
printk("SUN4U\n"); pr_info("ARCH: SUN4U\n");
break; break;
case sparc_leon: case sparc_leon:
printk("LEON\n"); pr_info("ARCH: LEON\n");
break; break;
default: default:
printk("UNKNOWN!\n"); pr_info("ARCH: UNKNOWN!\n");
break; break;
} }

View File

@ -642,9 +642,9 @@ void __init setup_arch(char **cmdline_p)
register_console(&prom_early_console); register_console(&prom_early_console);
if (tlb_type == hypervisor) if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n"); pr_info("ARCH: SUN4V\n");
else else
printk("ARCH: SUN4U\n"); pr_info("ARCH: SUN4U\n");
#ifdef CONFIG_DUMMY_CONSOLE #ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con; conswitchp = &dummy_con;

View File

@ -34,7 +34,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \ VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \
-z max-page-size=8192 -z common-page-size=8192 -z max-page-size=8192
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso) $(call if_changed,vdso)

View File

@ -232,13 +232,6 @@ archscripts: scripts_basic
archheaders: archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archmacros:
$(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
export ASM_MACRO_FLAGS
KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
### ###
# Kernel objects # Kernel objects

View File

@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro CALL_enter_from_user_mode .macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
#endif #endif
call enter_from_user_mode call enter_from_user_mode
.Lafter_call_\@: .Lafter_call_\@:

View File

@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
$(call ld-option, --build-id) -Bsymbolic $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
-Bsymbolic
GCOV_PROFILE := n GCOV_PROFILE := n
# #

View File

@ -7,24 +7,16 @@
#include <asm/asm.h> #include <asm/asm.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.macro LOCK_PREFIX_HERE .macro LOCK_PREFIX
672: lock
.pushsection .smp_locks,"a" .pushsection .smp_locks,"a"
.balign 4 .balign 4
.long 671f - . # offset .long 672b - .
.popsection .popsection
671: .endm
.endm
.macro LOCK_PREFIX insn:vararg
LOCK_PREFIX_HERE
lock \insn
.endm
#else #else
.macro LOCK_PREFIX_HERE .macro LOCK_PREFIX
.endm .endm
.macro LOCK_PREFIX insn:vararg
.endm
#endif #endif
/* /*

View File

@ -31,8 +31,15 @@
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t" #define LOCK_PREFIX_HERE \
#define LOCK_PREFIX "LOCK_PREFIX " ".pushsection .smp_locks,\"a\"\n" \
".balign 4\n" \
".long 671f - .\n" /* offset */ \
".popsection\n" \
"671:"
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */ #else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE "" #define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX "" #define LOCK_PREFIX ""

View File

@ -120,25 +120,12 @@
/* Exception table entry */ /* Exception table entry */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \ # define _ASM_EXTABLE_HANDLE(from, to, handler) \
ASM_EXTABLE_HANDLE from to handler .pushsection "__ex_table","a" ; \
.balign 4 ; \
.macro ASM_EXTABLE_HANDLE from:req to:req handler:req .long (from) - . ; \
.pushsection "__ex_table","a" .long (to) - . ; \
.balign 4 .long (handler) - . ; \
.long (\from) - .
.long (\to) - .
.long (\handler) - .
.popsection .popsection
.endm
#else /* __ASSEMBLY__ */
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
"ASM_EXTABLE_HANDLE from=" #from " to=" #to \
" handler=\"" #handler "\"\n\t"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \ # define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default) _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
@ -161,7 +148,6 @@
_ASM_PTR (entry); \ _ASM_PTR (entry); \
.popsection .popsection
#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION .macro ALIGN_DESTINATION
/* check for bad alignment of destination */ /* check for bad alignment of destination */
movl %edi,%ecx movl %edi,%ecx
@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b) _ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b) _ASM_EXTABLE_UA(101b, 103b)
.endm .endm
#endif /* __ASSEMBLY__ */
#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
" .popsection\n"
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
# define _ASM_EXTABLE_REFCOUNT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*

View File

@ -4,8 +4,6 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#ifndef __ASSEMBLY__
/* /*
* Despite that some emulators terminate on UD2, we use it for WARN(). * Despite that some emulators terminate on UD2, we use it for WARN().
* *
@ -22,15 +20,53 @@
#define LEN_UD2 2 #define LEN_UD2 2
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
# define __BUG_REL(val) ".long " __stringify(val)
#else
# define __BUG_REL(val) ".long " __stringify(val) " - 2b"
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _BUG_FLAGS(ins, flags) \ #define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \ asm volatile("1:\t" ins "\n" \
"flags=%c2 size=%c3" \ ".pushsection __bug_table,\"aw\"\n" \
: : "i" (__FILE__), "i" (__LINE__), \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"i" (flags), \ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
".popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \ "i" (sizeof(struct bug_entry))); \
} while (0) } while (0)
#else /* !CONFIG_DEBUG_BUGVERBOSE */
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
".popsection" \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
#define HAVE_ARCH_BUG #define HAVE_ARCH_BUG
#define BUG() \ #define BUG() \
do { \ do { \
@ -46,54 +82,4 @@ do { \
#include <asm-generic/bug.h> #include <asm-generic/bug.h>
#else /* __ASSEMBLY__ */
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
.macro __BUG_REL val:req
.long \val
.endm
#else
.macro __BUG_REL val:req
.long \val - 2b
.endm
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro ASM_BUG ins:req file:req line:req flags:req size:req
1: \ins
.pushsection __bug_table,"aw"
2: __BUG_REL val=1b # bug_entry::bug_addr
__BUG_REL val=\file # bug_entry::file
.word \line # bug_entry::line
.word \flags # bug_entry::flags
.org 2b+\size
.popsection
.endm
#else /* !CONFIG_DEBUG_BUGVERBOSE */
.macro ASM_BUG ins:req file:req line:req flags:req size:req
1: \ins
.pushsection __bug_table,"aw"
2: __BUG_REL val=1b # bug_entry::bug_addr
.word \flags # bug_entry::flags
.org 2b+\size
.popsection
.endm
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else /* CONFIG_GENERIC_BUG */
.macro ASM_BUG ins:req file:req line:req flags:req size:req
\ins
.endm
#endif /* CONFIG_GENERIC_BUG */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_BUG_H */ #endif /* _ASM_X86_BUG_H */

View File

@ -2,10 +2,10 @@
#ifndef _ASM_X86_CPUFEATURE_H #ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h> #include <asm/processor.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <asm/asm.h> #include <asm/asm.h>
#include <linux/bitops.h> #include <linux/bitops.h>
@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/ */
static __always_inline __pure bool _static_cpu_has(u16 bit) static __always_inline __pure bool _static_cpu_has(u16 bit)
{ {
asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " asm_volatile_goto("1: jmp 6f\n"
"cap_byte=\"%[cap_byte]\" " "2:\n"
"feature=%P[feature] t_yes=%l[t_yes] " ".skip -(((5f-4f) - (2b-1b)) > 0) * "
"t_no=%l[t_no] always=%P[always]" "((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 4f - .\n" /* repl offset */
" .word %P[always]\n" /* always replace */
" .byte 3b - 1b\n" /* src len */
" .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"4: jmp %l[t_no]\n"
"5:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P[feature]\n" /* feature bit */
" .byte 3b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum],%[cap_byte]\n"
" jnz %l[t_yes]\n"
" jmp %l[t_no]\n"
".previous\n"
: : [feature] "i" (bit), : : [feature] "i" (bit),
[always] "i" (X86_FEATURE_ALWAYS), [always] "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)), [bitnum] "i" (1 << (bit & 7)),
@ -199,44 +226,5 @@ t_no:
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model boot_cpu_data.x86_model
#else /* __ASSEMBLY__ */ #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req
1:
jmp 6f
2:
.skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90
3:
.section .altinstructions,"a"
.long 1b - . /* src offset */
.long 4f - . /* repl offset */
.word \always /* always replace */
.byte 3b - 1b /* src len */
.byte 5f - 4f /* repl len */
.byte 3b - 2b /* pad len */
.previous
.section .altinstr_replacement,"ax"
4:
jmp \t_no
5:
.previous
.section .altinstructions,"a"
.long 1b - . /* src offset */
.long 0 /* no replacement */
.word \feature /* feature bit */
.byte 3b - 1b /* src len */
.byte 0 /* repl len */
.byte 0 /* pad len */
.previous
.section .altinstr_aux,"ax"
6:
testb \bitnum,\cap_byte
jnz \t_yes
jmp \t_no
.previous
.endm
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_CPUFEATURE_H */ #endif /* _ASM_X86_CPUFEATURE_H */

View File

@ -16,8 +16,8 @@
*/ */
extern unsigned long x86_fsbase_read_task(struct task_struct *task); extern unsigned long x86_fsbase_read_task(struct task_struct *task);
extern unsigned long x86_gsbase_read_task(struct task_struct *task); extern unsigned long x86_gsbase_read_task(struct task_struct *task);
extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
/* Helper functions for reading/writing FS/GS base */ /* Helper functions for reading/writing FS/GS base */
@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void)
return gsbase; return gsbase;
} }
extern void x86_fsbase_write_cpu(unsigned long fsbase); static inline void x86_fsbase_write_cpu(unsigned long fsbase)
extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); {
wrmsrl(MSR_FS_BASE, fsbase);
}
static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */

View File

@ -2,6 +2,19 @@
#ifndef _ASM_X86_JUMP_LABEL_H #ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H
#ifndef HAVE_JUMP_LABEL
/*
* For better or for worse, if jump labels (the gcc extension) are missing,
* then the entire static branch patching infrastructure is compiled out.
* If that happens, the code in here will malfunction. Raise a compiler
* error instead.
*
* In theory, jump labels and the static branch patching infrastructure
* could be decoupled to fix this.
*/
#error asm/jump_label.h included on a non-jump-label kernel
#endif
#define JUMP_LABEL_NOP_SIZE 5 #define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
@ -20,9 +33,15 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch) static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{ {
asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" " asm_volatile_goto("1:"
"branch=\"%c1\"" ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
: : "i" (key), "i" (branch) : : l_yes); ".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false; return false;
l_yes: l_yes:
return true; return true;
@ -30,8 +49,14 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{ {
asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" " asm_volatile_goto("1:"
"branch=\"%c1\"" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
"2:\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes); : : "i" (key), "i" (branch) : : l_yes);
return false; return false;
@ -41,26 +66,37 @@ l_yes:
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req .macro STATIC_JUMP_IF_TRUE target, key, def
.Lstatic_branch_nop_\@: .Lstatic_jump_\@:
.byte STATIC_KEY_INIT_NOP .if \def
.Lstatic_branch_no_after_\@: /* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.else
.byte STATIC_KEY_INIT_NOP
.endif
.pushsection __jump_table, "aw" .pushsection __jump_table, "aw"
_ASM_ALIGN _ASM_ALIGN
.long .Lstatic_branch_nop_\@ - ., \l_yes - . .long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + \branch - . _ASM_PTR \key - .
.popsection .popsection
.endm .endm
.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req .macro STATIC_JUMP_IF_FALSE target, key, def
.Lstatic_branch_jmp_\@: .Lstatic_jump_\@:
.byte 0xe9 .if \def
.long \l_yes - .Lstatic_branch_jmp_after_\@ .byte STATIC_KEY_INIT_NOP
.Lstatic_branch_jmp_after_\@: .else
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.endif
.pushsection __jump_table, "aw" .pushsection __jump_table, "aw"
_ASM_ALIGN _ASM_ALIGN
.long .Lstatic_branch_jmp_\@ - ., \l_yes - . .long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + \branch - . _ASM_PTR \key + 1 - .
.popsection .popsection
.endm .endm

View File

@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops;
#define paravirt_clobber(clobber) \ #define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber) [paravirt_clobber] "i" (clobber)
/*
* Generate some code, and mark it as patchable by the
* apply_paravirt() alternate instruction patcher.
*/
#define _paravirt_alt(insn_string, type, clobber) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
" .short " clobber "\n" \
".popsection\n"
/* Generate patchable code, with the default asm parameters. */ /* Generate patchable code, with the default asm parameters. */
#define paravirt_call \ #define paravirt_alt(insn_string) \
"PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
" clobber=\"%c[paravirt_clobber]\"" \
" pv_opptr=\"%c[paravirt_opptr]\";"
/* Simple instruction patching code. */ /* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@ -372,6 +384,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
int paravirt_disable_iospace(void); int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%c[paravirt_opptr];"
/* /*
* These macros are intended to wrap calls through one of the paravirt * These macros are intended to wrap calls through one of the paravirt
* ops structs, so that they can be later identified and patched at * ops structs, so that they can be later identified and patched at
@ -509,7 +531,7 @@ int paravirt_disable_iospace(void);
/* since this condition will never hold */ \ /* since this condition will never hold */ \
if (sizeof(rettype) > sizeof(unsigned long)) { \ if (sizeof(rettype) > sizeof(unsigned long)) { \
asm volatile(pre \ asm volatile(pre \
paravirt_call \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
@ -519,7 +541,7 @@ int paravirt_disable_iospace(void);
__ret = (rettype)((((u64)__edx) << 32) | __eax); \ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \ } else { \
asm volatile(pre \ asm volatile(pre \
paravirt_call \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
@ -546,7 +568,7 @@ int paravirt_disable_iospace(void);
PVOP_VCALL_ARGS; \ PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \ PVOP_TEST_NULL(op); \
asm volatile(pre \ asm volatile(pre \
paravirt_call \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
@ -664,26 +686,6 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[], extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[]; __parainstructions_end[];
#else /* __ASSEMBLY__ */
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
771: ANNOTATE_RETPOLINE_SAFE
call *\pv_opptr
772: .pushsection .parainstructions,"a"
_ASM_ALIGN
_ASM_PTR 771b
.byte \type
.byte 772b-771b
.short \clobber
.popsection
.endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */

View File

@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d;
*/ */
#define MAXMEM (1UL << MAX_PHYSMEM_BITS) #define MAXMEM (1UL << MAX_PHYSMEM_BITS)
#define GUARD_HOLE_PGD_ENTRY -256UL
#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
#define LDT_PGD_ENTRY -240UL #define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)

View File

@ -4,41 +4,6 @@
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity. * PaX/grsecurity.
*/ */
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/bug.h>
.macro REFCOUNT_EXCEPTION counter:req
.pushsection .text..refcount
111: lea \counter, %_ASM_CX
112: ud2
ASM_UNREACHABLE
.popsection
113: _ASM_EXTABLE_REFCOUNT(112b, 113b)
.endm
/* Trigger refcount exception if refcount result is negative. */
.macro REFCOUNT_CHECK_LT_ZERO counter:req
js 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
/* Trigger refcount exception if refcount result is zero or negative. */
.macro REFCOUNT_CHECK_LE_ZERO counter:req
jz 111f
REFCOUNT_CHECK_LT_ZERO counter="\counter"
.endm
/* Trigger refcount exception unconditionally. */
.macro REFCOUNT_ERROR counter:req
jmp 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
#else /* __ASSEMBLY__ */
#include <linux/refcount.h> #include <linux/refcount.h>
#include <asm/bug.h> #include <asm/bug.h>
@ -50,12 +15,35 @@
* central refcount exception. The fixup address for the exception points * central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text. * back to the regular execution flow in .text.
*/ */
#define _REFCOUNT_EXCEPTION \
".pushsection .text..refcount\n" \
"111:\tlea %[var], %%" _ASM_CX "\n" \
"112:\t" ASM_UD2 "\n" \
ASM_UNREACHABLE \
".popsection\n" \
"113:\n" \
_ASM_EXTABLE_REFCOUNT(112b, 113b)
/* Trigger refcount exception if refcount result is negative. */
#define REFCOUNT_CHECK_LT_ZERO \
"js 111f\n\t" \
_REFCOUNT_EXCEPTION
/* Trigger refcount exception if refcount result is zero or negative. */
#define REFCOUNT_CHECK_LE_ZERO \
"jz 111f\n\t" \
REFCOUNT_CHECK_LT_ZERO
/* Trigger refcount exception unconditionally. */
#define REFCOUNT_ERROR \
"jmp 111f\n\t" \
_REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r) static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "addl %1,%0\n\t" asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
"REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: "ir" (i) : "ir" (i)
: "cc", "cx"); : "cc", "cx");
} }
@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r) static __always_inline void refcount_inc(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "incl %0\n\t" asm volatile(LOCK_PREFIX "incl %0\n\t"
"REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
static __always_inline void refcount_dec(refcount_t *r) static __always_inline void refcount_dec(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "decl %0\n\t" asm volatile(LOCK_PREFIX "decl %0\n\t"
"REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\"" REFCOUNT_CHECK_LE_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
static __always_inline __must_check static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r) bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{ {
return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "er", i, "cx"); r->refs.counter, e, "er", i, "cx");
} }
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{ {
return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "cx"); r->refs.counter, e, "cx");
} }
@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */ /* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) { if (unlikely(c < 0 || c == INT_MAX || result < c)) {
asm volatile("REFCOUNT_ERROR counter=\"%[counter]\"" asm volatile(REFCOUNT_ERROR
: : [counter] "m" (r->refs.counter) : : [var] "m" (r->refs.counter)
: "cc", "cx"); : "cc", "cx");
break; break;
} }
@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return refcount_add_not_zero(1, r); return refcount_add_not_zero(1, r);
} }
#endif /* __ASSEMBLY__ */
#endif #endif

View File

@ -23,6 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/kernfs.h> #include <linux/kernfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
return -EINVAL; return -EINVAL;
buf[nbytes - 1] = '\0'; buf[nbytes - 1] = '\0';
cpus_read_lock();
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) { if (!rdtgrp) {
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return -ENOENT; return -ENOENT;
} }
rdt_last_cmd_clear(); rdt_last_cmd_clear();
@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
out: out:
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return ret ?: nbytes; return ret ?: nbytes;
} }

View File

@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry; struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg; void __user *arg = (void __user *) __arg;
memset(&gentry, 0, sizeof(gentry));
switch (cmd) { switch (cmd) {
case MTRRIOC_ADD_ENTRY: case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY: case MTRRIOC_SET_ENTRY:

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file includes headers whose assembly part includes macros which are
* commonly used. The macros are precompiled into assmebly file which is later
* assembled together with each compiled file.
*/
#include <linux/compiler.h>
#include <asm/refcount.h>
#include <asm/alternative-asm.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/jump_label.h>

View File

@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base; return base;
} }
void x86_fsbase_write_cpu(unsigned long fsbase)
{
/*
* Set the selector to 0 as a notion, that the segment base is
* overwritten, which will be checked for skipping the segment load
* during context switch.
*/
loadseg(FS, 0);
wrmsrl(MSR_FS_BASE, fsbase);
}
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
/* Set the selector to 0 for the same reason as %fs above. */
loadseg(GS, 0);
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
unsigned long x86_fsbase_read_task(struct task_struct *task) unsigned long x86_fsbase_read_task(struct task_struct *task)
{ {
unsigned long fsbase; unsigned long fsbase;
@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
return gsbase; return gsbase;
} }
int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{ {
/* WARN_ON_ONCE(task == current);
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(fsbase >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
task->thread.fsbase = fsbase; task->thread.fsbase = fsbase;
if (task == current)
x86_fsbase_write_cpu(fsbase);
task->thread.fsindex = 0;
preempt_enable();
return 0;
} }
int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{ {
if (unlikely(gsbase >= TASK_SIZE_MAX)) WARN_ON_ONCE(task == current);
return -EPERM;
preempt_disable();
task->thread.gsbase = gsbase; task->thread.gsbase = gsbase;
if (task == current)
x86_gsbase_write_cpu_inactive(gsbase);
task->thread.gsindex = 0;
preempt_enable();
return 0;
} }
int copy_thread_tls(unsigned long clone_flags, unsigned long sp, int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
switch (option) { switch (option) {
case ARCH_SET_GS: { case ARCH_SET_GS: {
ret = x86_gsbase_write_task(task, arg2); if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* ARCH_SET_GS has always overwritten the index
* and the base. Zero is the most sensible value
* to put in the index, and is the only value that
* makes any sense if FSGSBASE is unavailable.
*/
if (task == current) {
loadseg(GS, 0);
x86_gsbase_write_cpu_inactive(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.gsbase.
*/
task->thread.gsbase = arg2;
} else {
task->thread.gsindex = 0;
x86_gsbase_write_task(task, arg2);
}
preempt_enable();
break; break;
} }
case ARCH_SET_FS: { case ARCH_SET_FS: {
ret = x86_fsbase_write_task(task, arg2); /*
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* Set the selector to 0 for the same reason
* as %gs above.
*/
if (task == current) {
loadseg(FS, 0);
x86_fsbase_write_cpu(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.fsbase.
*/
task->thread.fsbase = arg2;
} else {
task->thread.fsindex = 0;
x86_fsbase_write_task(task, arg2);
}
preempt_enable();
break; break;
} }
case ARCH_GET_FS: { case ARCH_GET_FS: {

View File

@ -397,11 +397,12 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX) if (value >= TASK_SIZE_MAX)
return -EIO; return -EIO;
/* /*
* When changing the FS base, use the same * When changing the FS base, use do_arch_prctl_64()
* mechanism as for do_arch_prctl_64(). * to set the index to zero and to set the base
* as requested.
*/ */
if (child->thread.fsbase != value) if (child->thread.fsbase != value)
return x86_fsbase_write_task(child, value); return do_arch_prctl_64(child, ARCH_SET_FS, value);
return 0; return 0;
case offsetof(struct user_regs_struct,gs_base): case offsetof(struct user_regs_struct,gs_base):
/* /*
@ -410,7 +411,7 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX) if (value >= TASK_SIZE_MAX)
return -EIO; return -EIO;
if (child->thread.gsbase != value) if (child->thread.gsbase != value)
return x86_gsbase_write_task(child, value); return do_arch_prctl_64(child, ARCH_SET_GS, value);
return 0; return 0;
#endif #endif
} }

View File

@ -2937,6 +2937,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
WARN_ON(mmu_is_nested(vcpu)); WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
@ -2949,6 +2951,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
} }
@ -3458,7 +3461,6 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vcpu.arch.hflags &= ~HF_HIF_MASK; svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) { if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
kvm_mmu_unload(&svm->vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu); nested_svm_init_mmu_context(&svm->vcpu);
} }

View File

@ -55,10 +55,10 @@ struct addr_marker {
enum address_markers_idx { enum address_markers_idx {
USER_SPACE_NR = 0, USER_SPACE_NR = 0,
KERNEL_SPACE_NR, KERNEL_SPACE_NR,
LOW_KERNEL_NR, #ifdef CONFIG_MODIFY_LDT_SYSCALL
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
LDT_NR, LDT_NR,
#endif #endif
LOW_KERNEL_NR,
VMALLOC_START_NR, VMALLOC_START_NR,
VMEMMAP_START_NR, VMEMMAP_START_NR,
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
@ -66,9 +66,6 @@ enum address_markers_idx {
KASAN_SHADOW_END_NR, KASAN_SHADOW_END_NR,
#endif #endif
CPU_ENTRY_AREA_NR, CPU_ENTRY_AREA_NR,
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
LDT_NR,
#endif
#ifdef CONFIG_X86_ESPFIX64 #ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR, ESPFIX_START_NR,
#endif #endif
@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
* ffff800000000000 - ffff87ffffffffff is reserved for * A hole in the beginning of kernel address space reserved
* the hypervisor. * for a hypervisor.
*/ */
return (idx >= pgd_index(__PAGE_OFFSET) - 16) && return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
(idx < pgd_index(__PAGE_OFFSET)); (idx < pgd_index(GUARD_HOLE_END_ADDR));
#else #else
return false; return false;
#endif #endif

View File

@ -285,20 +285,16 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1); on_each_cpu(__cpa_flush_all, (void *) cache, 1);
} }
static bool __cpa_flush_range(unsigned long start, int numpages, int cache) static bool __inv_flush_all(int cache)
{ {
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start);
if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache); cpa_flush_all(cache);
return true; return true;
} }
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); return false;
return !cache;
} }
static void cpa_flush_range(unsigned long start, int numpages, int cache) static void cpa_flush_range(unsigned long start, int numpages, int cache)
@ -306,7 +302,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level; unsigned int i, level;
unsigned long addr; unsigned long addr;
if (__cpa_flush_range(start, numpages, cache)) WARN_ON(PAGE_ALIGN(start) != start);
if (__inv_flush_all(cache))
return;
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
if (!cache)
return; return;
/* /*
@ -332,7 +335,12 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
{ {
unsigned int i, level; unsigned int i, level;
if (__cpa_flush_range(baddr, numpages, cache)) if (__inv_flush_all(cache))
return;
flush_tlb_all();
if (!cache)
return; return;
/* /*

View File

@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
* for a "decoy" virtual address (bit 63 clear) passed to * for a "decoy" virtual address (bit 63 clear) passed to
* set_memory_X(). __pa() on a "decoy" address results in a * set_memory_X(). __pa() on a "decoy" address results in a
* physical address with bit 63 set. * physical address with bit 63 set.
*
* Decoy addresses are not present for 32-bit builds, see
* set_mce_nospec().
*/ */
return address & __PHYSICAL_MASK; if (IS_ENABLED(CONFIG_X86_64))
return address & __PHYSICAL_MASK;
return address;
} }
/* /*
@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
start = sanitize_phys(start); start = sanitize_phys(start);
end = sanitize_phys(end); end = sanitize_phys(end);
BUG_ON(start >= end); /* end is exclusive */ if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));
return -EINVAL;
}
if (!pat_enabled()) { if (!pat_enabled()) {
/* This is identical to page table setting without PAT */ /* This is identical to page table setting without PAT */

View File

@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
unsigned long limit) unsigned long limit)
{ {
int i, nr, flush = 0; int i, nr, flush = 0;
unsigned hole_low, hole_high; unsigned hole_low = 0, hole_high = 0;
/* The limit is the last byte to be touched */ /* The limit is the last byte to be touched */
limit--; limit--;
BUG_ON(limit >= FIXADDR_TOP); BUG_ON(limit >= FIXADDR_TOP);
#ifdef CONFIG_X86_64
/* /*
* 64-bit has a great big hole in the middle of the address * 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings. On 32-bit these * space, which contains the Xen mappings.
* will end up making a zero-sized hole and so is a no-op.
*/ */
hole_low = pgd_index(USER_LIMIT); hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
hole_high = pgd_index(PAGE_OFFSET); hole_high = pgd_index(GUARD_HOLE_END_ADDR);
#endif
nr = pgd_index(limit) + 1; nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {

View File

@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
struct spi_device *spi = to_spi_device(dev); struct spi_device *spi = to_spi_device(dev);
u16 word = ((reg & 0x7F) << 8) | (val & 0xFF); u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
return spi_write(spi, (const u8 *)&word, sizeof(word)); return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
} }
/* A read from the MAX7301 means two transfers; here, one message each */ /* A read from the MAX7301 means two transfers; here, one message each */
@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
struct spi_device *spi = to_spi_device(dev); struct spi_device *spi = to_spi_device(dev);
word = 0x8000 | (reg << 8); word = 0x8000 | (reg << 8);
ret = spi_write(spi, (const u8 *)&word, sizeof(word)); ret = spi_write_then_read(spi, &word, sizeof(word), &word,
if (ret) sizeof(word));
return ret;
/*
* This relies on the fact, that a transfer with NULL tx_buf shifts out
* zero bytes (=NOOP for MAX7301)
*/
ret = spi_read(spi, (u8 *)&word, sizeof(word));
if (ret) if (ret)
return ret; return ret;
return word & 0xff; return word & 0xff;

View File

@ -773,9 +773,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
"marvell,armada-370-gpio")) "marvell,armada-370-gpio"))
return 0; return 0;
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
/* /*
* There are only two sets of PWM configuration registers for * There are only two sets of PWM configuration registers for
* all the GPIO lines on those SoCs which this driver reserves * all the GPIO lines on those SoCs which this driver reserves
@ -786,6 +783,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (!res) if (!res)
return 0; return 0;
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
/* /*
* Use set A for lines of GPIO chip with id 0, B for GPIO chip * Use set A for lines of GPIO chip with id 0, B for GPIO chip
* with id 1. Don't allow further GPIO chips to be used for PWM. * with id 1. Don't allow further GPIO chips to be used for PWM.

View File

@ -32,7 +32,6 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
#define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2) #define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2)
#define OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN BIT(1)
struct gpio_regs { struct gpio_regs {
u32 irqenable1; u32 irqenable1;
@ -379,18 +378,9 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
readl_relaxed(bank->base + bank->regs->fallingdetect); readl_relaxed(bank->base + bank->regs->fallingdetect);
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
/* Defer wkup_en register update until we idle? */ omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) { bank->context.wake_en =
if (trigger) readl_relaxed(bank->base + bank->regs->wkup_en);
bank->context.wake_en |= gpio_bit;
else
bank->context.wake_en &= ~gpio_bit;
} else {
omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit,
trigger != 0);
bank->context.wake_en =
readl_relaxed(bank->base + bank->regs->wkup_en);
}
} }
/* This part needs to be executed always for OMAP{34xx, 44xx} */ /* This part needs to be executed always for OMAP{34xx, 44xx} */
@ -942,44 +932,6 @@ omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
bank->base + bank->regs->risingdetect); bank->base + bank->regs->risingdetect);
} }
/*
* On omap4 and later SoC variants a level interrupt with wkup_en
* enabled blocks the GPIO functional clock from idling until the GPIO
* instance has been reset. To avoid that, we must set wkup_en only for
* idle for level interrupts, and clear level registers for the duration
* of idle. The level interrupts will be still there on wakeup by their
* nature.
*/
static void __maybe_unused
omap4_gpio_enable_level_quirk(struct gpio_bank *bank)
{
/* Update wake register for idle, edge bits might be already set */
writel_relaxed(bank->context.wake_en,
bank->base + bank->regs->wkup_en);
/* Clear level registers for idle */
writel_relaxed(0, bank->base + bank->regs->leveldetect0);
writel_relaxed(0, bank->base + bank->regs->leveldetect1);
}
static void __maybe_unused
omap4_gpio_disable_level_quirk(struct gpio_bank *bank)
{
/* Restore level registers after idle */
writel_relaxed(bank->context.leveldetect0,
bank->base + bank->regs->leveldetect0);
writel_relaxed(bank->context.leveldetect1,
bank->base + bank->regs->leveldetect1);
/* Clear saved wkup_en for level, it will be set for next idle again */
bank->context.wake_en &= ~(bank->context.leveldetect0 |
bank->context.leveldetect1);
/* Update wake with only edge configuration */
writel_relaxed(bank->context.wake_en,
bank->base + bank->regs->wkup_en);
}
/*---------------------------------------------------------------------*/ /*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev) static int omap_mpuio_suspend_noirq(struct device *dev)
@ -1412,12 +1364,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_set_gpio_dataout_mask_multiple; omap_set_gpio_dataout_mask_multiple;
} }
if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) { if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
bank->funcs.idle_enable_level_quirk =
omap4_gpio_enable_level_quirk;
bank->funcs.idle_disable_level_quirk =
omap4_gpio_disable_level_quirk;
} else if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
bank->funcs.idle_enable_level_quirk = bank->funcs.idle_enable_level_quirk =
omap2_gpio_enable_level_quirk; omap2_gpio_enable_level_quirk;
bank->funcs.idle_disable_level_quirk = bank->funcs.idle_disable_level_quirk =
@ -1806,8 +1753,7 @@ static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs, .regs = &omap4_gpio_regs,
.bank_width = 32, .bank_width = 32,
.dbck_flag = true, .dbck_flag = true,
.quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER | .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN,
}; };
static const struct of_device_id omap_gpio_match[] = { static const struct of_device_id omap_gpio_match[] = {

View File

@ -19,11 +19,28 @@
#include "gpiolib.h" #include "gpiolib.h"
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
* @node: list-entry of the events list of the struct acpi_gpio_chip
* @handle: handle of ACPI method to execute when the IRQ triggers
* @handler: irq_handler to pass to request_irq when requesting the IRQ
* @pin: GPIO pin number on the gpio_chip
* @irq: Linux IRQ number for the event, for request_ / free_irq
* @irqflags: flags to pass to request_irq when requesting the IRQ
* @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
* @is_requested: True if request_irq has been done
* @desc: gpio_desc for the GPIO pin for this event
*/
struct acpi_gpio_event { struct acpi_gpio_event {
struct list_head node; struct list_head node;
acpi_handle handle; acpi_handle handle;
irq_handler_t handler;
unsigned int pin; unsigned int pin;
unsigned int irq; unsigned int irq;
unsigned long irqflags;
bool irq_is_wake;
bool irq_requested;
struct gpio_desc *desc; struct gpio_desc *desc;
}; };
@ -49,10 +66,10 @@ struct acpi_gpio_chip {
/* /*
* For gpiochips which call acpi_gpiochip_request_interrupts() before late_init * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
* (so builtin drivers) we register the ACPI GpioInt event handlers from a * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
* late_initcall_sync handler, so that other builtin drivers can register their * late_initcall_sync handler, so that other builtin drivers can register their
* OpRegions before the event handlers can run. This list contains gpiochips * OpRegions before the event handlers can run. This list contains gpiochips
* for which the acpi_gpiochip_request_interrupts() has been deferred. * for which the acpi_gpiochip_request_irqs() call has been deferred.
*/ */
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@ -133,8 +150,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
} }
EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
void *context) struct acpi_gpio_event *event)
{
int ret, value;
ret = request_threaded_irq(event->irq, NULL, event->handler,
event->irqflags, "ACPI:Event", event);
if (ret) {
dev_err(acpi_gpio->chip->parent,
"Failed to setup interrupt handler for %d\n",
event->irq);
return;
}
if (event->irq_is_wake)
enable_irq_wake(event->irq);
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
value = gpiod_get_raw_value_cansleep(event->desc);
if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
event->handler(event->irq, event);
}
static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
{
struct acpi_gpio_event *event;
list_for_each_entry(event, &acpi_gpio->events, node)
acpi_gpiochip_request_irq(acpi_gpio, event);
}
static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
void *context)
{ {
struct acpi_gpio_chip *acpi_gpio = context; struct acpi_gpio_chip *acpi_gpio = context;
struct gpio_chip *chip = acpi_gpio->chip; struct gpio_chip *chip = acpi_gpio->chip;
@ -143,8 +194,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
struct acpi_gpio_event *event; struct acpi_gpio_event *event;
irq_handler_t handler = NULL; irq_handler_t handler = NULL;
struct gpio_desc *desc; struct gpio_desc *desc;
unsigned long irqflags; int ret, pin, irq;
int ret, pin, irq, value;
if (!acpi_gpio_get_irq_resource(ares, &agpio)) if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK; return AE_OK;
@ -175,8 +225,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc); gpiod_direction_input(desc);
value = gpiod_get_value_cansleep(desc);
ret = gpiochip_lock_as_irq(chip, pin); ret = gpiochip_lock_as_irq(chip, pin);
if (ret) { if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@ -189,64 +237,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
goto fail_unlock_irq; goto fail_unlock_irq;
} }
irqflags = IRQF_ONESHOT;
if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
if (agpio->polarity == ACPI_ACTIVE_HIGH)
irqflags |= IRQF_TRIGGER_HIGH;
else
irqflags |= IRQF_TRIGGER_LOW;
} else {
switch (agpio->polarity) {
case ACPI_ACTIVE_HIGH:
irqflags |= IRQF_TRIGGER_RISING;
break;
case ACPI_ACTIVE_LOW:
irqflags |= IRQF_TRIGGER_FALLING;
break;
default:
irqflags |= IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING;
break;
}
}
event = kzalloc(sizeof(*event), GFP_KERNEL); event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event) if (!event)
goto fail_unlock_irq; goto fail_unlock_irq;
event->irqflags = IRQF_ONESHOT;
if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
if (agpio->polarity == ACPI_ACTIVE_HIGH)
event->irqflags |= IRQF_TRIGGER_HIGH;
else
event->irqflags |= IRQF_TRIGGER_LOW;
} else {
switch (agpio->polarity) {
case ACPI_ACTIVE_HIGH:
event->irqflags |= IRQF_TRIGGER_RISING;
break;
case ACPI_ACTIVE_LOW:
event->irqflags |= IRQF_TRIGGER_FALLING;
break;
default:
event->irqflags |= IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING;
break;
}
}
event->handle = evt_handle; event->handle = evt_handle;
event->handler = handler;
event->irq = irq; event->irq = irq;
event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin; event->pin = pin;
event->desc = desc; event->desc = desc;
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
if (ret) {
dev_err(chip->parent,
"Failed to setup interrupt handler for %d\n",
event->irq);
goto fail_free_event;
}
if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
enable_irq_wake(irq);
list_add_tail(&event->node, &acpi_gpio->events); list_add_tail(&event->node, &acpi_gpio->events);
/*
* Make sure we trigger the initial state of the IRQ when using RISING
* or FALLING. Note we run the handlers on late_init, the AML code
* may refer to OperationRegions from other (builtin) drivers which
* may be probed after us.
*/
if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
handler(event->irq, event);
return AE_OK; return AE_OK;
fail_free_event:
kfree(event);
fail_unlock_irq: fail_unlock_irq:
gpiochip_unlock_as_irq(chip, pin); gpiochip_unlock_as_irq(chip, pin);
fail_free_desc: fail_free_desc:
@ -283,6 +309,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return; return;
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_alloc_event, acpi_gpio);
mutex_lock(&acpi_gpio_deferred_req_irqs_lock); mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
defer = !acpi_gpio_deferred_req_irqs_done; defer = !acpi_gpio_deferred_req_irqs_done;
if (defer) if (defer)
@ -293,8 +322,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (defer) if (defer)
return; return;
acpi_walk_resources(handle, "_AEI", acpi_gpiochip_request_irqs(acpi_gpio);
acpi_gpiochip_request_interrupt, acpi_gpio);
} }
EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
@ -331,10 +359,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc; struct gpio_desc *desc;
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) if (event->irq_requested) {
disable_irq_wake(event->irq); if (event->irq_is_wake)
disable_irq_wake(event->irq);
free_irq(event->irq, event);
}
free_irq(event->irq, event);
desc = event->desc; desc = event->desc;
if (WARN_ON(IS_ERR(desc))) if (WARN_ON(IS_ERR(desc)))
continue; continue;
@ -1200,23 +1231,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL; return con_id == NULL;
} }
/* Run deferred acpi_gpiochip_request_interrupts() */ /* Run deferred acpi_gpiochip_request_irqs() */
static int acpi_gpio_handle_deferred_request_interrupts(void) static int acpi_gpio_handle_deferred_request_irqs(void)
{ {
struct acpi_gpio_chip *acpi_gpio, *tmp; struct acpi_gpio_chip *acpi_gpio, *tmp;
mutex_lock(&acpi_gpio_deferred_req_irqs_lock); mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe(acpi_gpio, tmp, list_for_each_entry_safe(acpi_gpio, tmp,
&acpi_gpio_deferred_req_irqs_list, &acpi_gpio_deferred_req_irqs_list,
deferred_req_irqs_list_entry) { deferred_req_irqs_list_entry)
acpi_handle handle; acpi_gpiochip_request_irqs(acpi_gpio);
handle = ACPI_HANDLE(acpi_gpio->chip->parent);
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
}
acpi_gpio_deferred_req_irqs_done = true; acpi_gpio_deferred_req_irqs_done = true;
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@ -1224,4 +1248,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
return 0; return 0;
} }
/* We must use _sync so that this runs after the first deferred_probe run */ /* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);

View File

@ -37,6 +37,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/nospec.h>
/** /**
* DOC: getunique and setversion story * DOC: getunique and setversion story
@ -800,13 +801,17 @@ long drm_ioctl(struct file *filp,
if (is_driver_ioctl) { if (is_driver_ioctl) {
/* driver ioctl */ /* driver ioctl */
if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls) unsigned int index = nr - DRM_COMMAND_BASE;
if (index >= dev->driver->num_ioctls)
goto err_i1; goto err_i1;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; index = array_index_nospec(index, dev->driver->num_ioctls);
ioctl = &dev->driver->ioctls[index];
} else { } else {
/* core ioctl */ /* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT) if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1; goto err_i1;
nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr]; ioctl = &drm_ioctls[nr];
} }
@ -888,6 +893,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
if (nr >= DRM_CORE_IOCTL_COUNT) if (nr >= DRM_CORE_IOCTL_COUNT)
return false; return false;
nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags; *flags = drm_ioctls[nr].flags;
return true; return true;

View File

@ -342,7 +342,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
} }
static int gpu_i2c_resume(struct device *dev) static __maybe_unused int gpu_i2c_resume(struct device *dev)
{ {
struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev); struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);

View File

@ -126,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
{ {
struct omap4_keypad *keypad_data = dev_id; struct omap4_keypad *keypad_data = dev_id;
if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) { if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
/* Disable interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_VAL_IRQDISABLE);
return IRQ_WAKE_THREAD; return IRQ_WAKE_THREAD;
}
return IRQ_NONE; return IRQ_NONE;
} }
@ -173,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
/* enable interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_DEF_IRQENABLE_EVENTEN |
OMAP4_DEF_IRQENABLE_LONGKEY);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -214,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq); disable_irq(keypad_data->irq);
/* Disable interrupts */ /* Disable interrupts and wake-up events */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_VAL_IRQDISABLE); OMAP4_VAL_IRQDISABLE);
kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
/* clear pending interrupts */ /* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@ -365,7 +357,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
} }
error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler, error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
omap4_keypad_irq_thread_fn, 0, omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
"omap4-keypad", keypad_data); "omap4-keypad", keypad_data);
if (error) { if (error) {
dev_err(&pdev->dev, "failed to register interrupt\n"); dev_err(&pdev->dev, "failed to register interrupt\n");

View File

@ -1767,6 +1767,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ?
module_param_named(elantech_smbus, elantech_smbus, int, 0644); module_param_named(elantech_smbus, elantech_smbus, int, 0644);
MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device."); MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
static const char * const i2c_blacklist_pnp_ids[] = {
/*
* These are known to not be working properly as bits are missing
* in elan_i2c.
*/
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
"LEN2133", /* ThinkPad P72 w/ NFC */
"LEN2134", /* ThinkPad P72 */
NULL
};
static int elantech_create_smbus(struct psmouse *psmouse, static int elantech_create_smbus(struct psmouse *psmouse,
struct elantech_device_info *info, struct elantech_device_info *info,
bool leave_breadcrumbs) bool leave_breadcrumbs)
@ -1802,10 +1814,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) { if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
/* /*
* New ICs are enabled by default. * New ICs are enabled by default, unless mentioned in
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide. * Old ICs are up to the user to decide.
*/ */
if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO; return -ENXIO;
} }

View File

@ -171,6 +171,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0046", /* X250 */ "LEN0046", /* X250 */
"LEN004a", /* W541 */ "LEN004a", /* W541 */
"LEN005b", /* P50 */ "LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN0071", /* T480 */ "LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */ "LEN0073", /* X1 Carbon G5 (Elantech) */
@ -178,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0096", /* X280 */ "LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */ "LEN0097", /* X280 -> ALPS trackpoint */
"LEN200f", /* T450s */ "LEN200f", /* T450s */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */ "SYN3221", /* HP 15-ay000 */
NULL NULL
}; };

View File

@ -1101,10 +1101,10 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_wl_close(ubi); ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi); ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl); vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf); vfree(ubi->peb_buf);
vfree(ubi->fm_buf); vfree(ubi->fm_buf);
ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index); ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
put_mtd_device(ubi->mtd);
put_device(&ubi->dev); put_device(&ubi->dev);
return 0; return 0;
} }

View File

@ -227,9 +227,9 @@ out_unlock:
out_free: out_free:
kfree(desc); kfree(desc);
out_put_ubi: out_put_ubi:
ubi_put_device(ubi);
ubi_err(ubi, "cannot open device %d, volume %d, error %d", ubi_err(ubi, "cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err); ubi_num, vol_id, err);
ubi_put_device(ubi);
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(ubi_open_volume); EXPORT_SYMBOL_GPL(ubi_open_volume);

View File

@ -746,7 +746,7 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
} }
return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
ENTRYTYPE_NORMAL); ENTRYTYPE_LOCKED);
} }
static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],

View File

@ -1125,7 +1125,8 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
return -EINVAL; return -EINVAL;
} }
val = nx_get_bios_version(adapter); val = nx_get_bios_version(adapter);
netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
return -EIO;
if ((__force u32)val != bios) { if ((__force u32)val != bios) {
dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
fw_name[fw_type]); fw_name[fw_type]);

View File

@ -151,17 +151,18 @@ static bool qmimux_has_slaves(struct usbnet *dev)
static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{ {
unsigned int len, offset = sizeof(struct qmimux_hdr); unsigned int len, offset = 0;
struct qmimux_hdr *hdr; struct qmimux_hdr *hdr;
struct net_device *net; struct net_device *net;
struct sk_buff *skbn; struct sk_buff *skbn;
u8 qmimux_hdr_sz = sizeof(*hdr);
while (offset < skb->len) { while (offset + qmimux_hdr_sz < skb->len) {
hdr = (struct qmimux_hdr *)skb->data; hdr = (struct qmimux_hdr *)(skb->data + offset);
len = be16_to_cpu(hdr->pkt_len); len = be16_to_cpu(hdr->pkt_len);
/* drop the packet, bogus length */ /* drop the packet, bogus length */
if (offset + len > skb->len) if (offset + len + qmimux_hdr_sz > skb->len)
return 0; return 0;
/* control packet, we do not know what to do */ /* control packet, we do not know what to do */
@ -176,7 +177,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0; return 0;
skbn->dev = net; skbn->dev = net;
switch (skb->data[offset] & 0xf0) { switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40: case 0x40:
skbn->protocol = htons(ETH_P_IP); skbn->protocol = htons(ETH_P_IP);
break; break;
@ -188,12 +189,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
goto skip; goto skip;
} }
skb_put_data(skbn, skb->data + offset, len); skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
if (netif_rx(skbn) != NET_RX_SUCCESS) if (netif_rx(skbn) != NET_RX_SUCCESS)
return 0; return 0;
skip: skip:
offset += len + sizeof(struct qmimux_hdr); offset += len + qmimux_hdr_sz;
} }
return 1; return 1;
} }
@ -1265,6 +1266,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
/* 4. Gobi 1000 devices */ /* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View File

@ -1394,22 +1394,43 @@ static inline struct console *SUNSU_CONSOLE(void)
static enum su_type su_get_type(struct device_node *dp) static enum su_type su_get_type(struct device_node *dp)
{ {
struct device_node *ap = of_find_node_by_path("/aliases"); struct device_node *ap = of_find_node_by_path("/aliases");
enum su_type rc = SU_PORT_PORT;
if (ap) { if (ap) {
const char *keyb = of_get_property(ap, "keyboard", NULL); const char *keyb = of_get_property(ap, "keyboard", NULL);
const char *ms = of_get_property(ap, "mouse", NULL); const char *ms = of_get_property(ap, "mouse", NULL);
struct device_node *match;
if (keyb) { if (keyb) {
if (dp == of_find_node_by_path(keyb)) match = of_find_node_by_path(keyb);
return SU_PORT_KBD;
/*
* The pointer is used as an identifier not
* as a pointer, we can drop the refcount on
* the of__node immediately after getting it.
*/
of_node_put(match);
if (dp == match) {
rc = SU_PORT_KBD;
goto out;
}
} }
if (ms) { if (ms) {
if (dp == of_find_node_by_path(ms)) match = of_find_node_by_path(ms);
return SU_PORT_MS;
of_node_put(match);
if (dp == match) {
rc = SU_PORT_MS;
goto out;
}
} }
} }
return SU_PORT_PORT; out:
of_node_put(ap);
return rc;
} }
static int su_probe(struct platform_device *op) static int su_probe(struct platform_device *op)

View File

@ -97,7 +97,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (rc) if (rc)
goto finished; goto finished;
smb2_set_next_command(server, &rqst[num_rqst++]); smb2_set_next_command(server, &rqst[num_rqst++], 0);
/* Operation */ /* Operation */
switch (command) { switch (command) {
@ -111,7 +111,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0, SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) + sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL); PATH_MAX * 2, 0, NULL);
smb2_set_next_command(server, &rqst[num_rqst]); smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]); smb2_set_related(&rqst[num_rqst++]);
break; break;
case SMB2_OP_DELETE: case SMB2_OP_DELETE:
@ -127,14 +127,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
rqst[num_rqst].rq_iov = si_iov; rqst[num_rqst].rq_iov = si_iov;
rqst[num_rqst].rq_nvec = 1; rqst[num_rqst].rq_nvec = 1;
size[0] = 8; size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
data[0] = &delete_pending[0]; data[0] = &delete_pending[0];
rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID, rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid, COMPOUND_FID, current->tgid,
FILE_DISPOSITION_INFORMATION, FILE_DISPOSITION_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size); SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(server, &rqst[num_rqst]); smb2_set_next_command(server, &rqst[num_rqst], 1);
smb2_set_related(&rqst[num_rqst++]); smb2_set_related(&rqst[num_rqst++]);
break; break;
case SMB2_OP_SET_EOF: case SMB2_OP_SET_EOF:
@ -149,7 +149,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid, COMPOUND_FID, current->tgid,
FILE_END_OF_FILE_INFORMATION, FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size); SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(server, &rqst[num_rqst]); smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]); smb2_set_related(&rqst[num_rqst++]);
break; break;
case SMB2_OP_SET_INFO: case SMB2_OP_SET_INFO:
@ -165,7 +165,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid, COMPOUND_FID, current->tgid,
FILE_BASIC_INFORMATION, FILE_BASIC_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size); SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(server, &rqst[num_rqst]); smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]); smb2_set_related(&rqst[num_rqst++]);
break; break;
case SMB2_OP_RENAME: case SMB2_OP_RENAME:
@ -189,7 +189,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid, COMPOUND_FID, current->tgid,
FILE_RENAME_INFORMATION, FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size); SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(server, &rqst[num_rqst]); smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]); smb2_set_related(&rqst[num_rqst++]);
break; break;
case SMB2_OP_HARDLINK: case SMB2_OP_HARDLINK:
@ -213,7 +213,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid, COMPOUND_FID, current->tgid,
FILE_LINK_INFORMATION, FILE_LINK_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size); SMB2_O_INFO_FILE, 0, data, size);
smb2_set_next_command(server, &rqst[num_rqst]); smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]); smb2_set_related(&rqst[num_rqst++]);
break; break;
default: default:

View File

@ -1194,7 +1194,7 @@ smb2_ioctl_query_info(const unsigned int xid,
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path); rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
if (rc) if (rc)
goto iqinf_exit; goto iqinf_exit;
smb2_set_next_command(ses->server, &rqst[0]); smb2_set_next_command(ses->server, &rqst[0], 0);
/* Query */ /* Query */
memset(&qi_iov, 0, sizeof(qi_iov)); memset(&qi_iov, 0, sizeof(qi_iov));
@ -1208,7 +1208,7 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.output_buffer_length, buffer); qi.output_buffer_length, buffer);
if (rc) if (rc)
goto iqinf_exit; goto iqinf_exit;
smb2_set_next_command(ses->server, &rqst[1]); smb2_set_next_command(ses->server, &rqst[1], 0);
smb2_set_related(&rqst[1]); smb2_set_related(&rqst[1]);
/* Close */ /* Close */
@ -1761,16 +1761,23 @@ smb2_set_related(struct smb_rqst *rqst)
char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0}; char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
void void
smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst) smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst,
bool has_space_for_padding)
{ {
struct smb2_sync_hdr *shdr; struct smb2_sync_hdr *shdr;
unsigned long len = smb_rqst_len(server, rqst); unsigned long len = smb_rqst_len(server, rqst);
/* SMB headers in a compound are 8 byte aligned. */ /* SMB headers in a compound are 8 byte aligned. */
if (len & 7) { if (len & 7) {
rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding; if (has_space_for_padding) {
rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7); len = rqst->rq_iov[rqst->rq_nvec - 1].iov_len;
rqst->rq_nvec++; rqst->rq_iov[rqst->rq_nvec - 1].iov_len =
(len + 7) & ~7;
} else {
rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
rqst->rq_nvec++;
}
len = smb_rqst_len(server, rqst); len = smb_rqst_len(server, rqst);
} }
@ -1820,7 +1827,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &srch_path); rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &srch_path);
if (rc) if (rc)
goto qfs_exit; goto qfs_exit;
smb2_set_next_command(server, &rqst[0]); smb2_set_next_command(server, &rqst[0], 0);
memset(&qi_iov, 0, sizeof(qi_iov)); memset(&qi_iov, 0, sizeof(qi_iov));
rqst[1].rq_iov = qi_iov; rqst[1].rq_iov = qi_iov;
@ -1833,7 +1840,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
NULL); NULL);
if (rc) if (rc)
goto qfs_exit; goto qfs_exit;
smb2_set_next_command(server, &rqst[1]); smb2_set_next_command(server, &rqst[1], 0);
smb2_set_related(&rqst[1]); smb2_set_related(&rqst[1]);
memset(&close_iov, 0, sizeof(close_iov)); memset(&close_iov, 0, sizeof(close_iov));

View File

@ -117,7 +117,8 @@ extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
extern unsigned long smb_rqst_len(struct TCP_Server_Info *server, extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
struct smb_rqst *rqst); struct smb_rqst *rqst);
extern void smb2_set_next_command(struct TCP_Server_Info *server, extern void smb2_set_next_command(struct TCP_Server_Info *server,
struct smb_rqst *rqst); struct smb_rqst *rqst,
bool has_space_for_padding);
extern void smb2_set_related(struct smb_rqst *rqst); extern void smb2_set_related(struct smb_rqst *rqst);
/* /*

View File

@ -12,9 +12,10 @@ config UBIFS_FS
help help
UBIFS is a file system for flash devices which works on top of UBI. UBIFS is a file system for flash devices which works on top of UBI.
if UBIFS_FS
config UBIFS_FS_ADVANCED_COMPR config UBIFS_FS_ADVANCED_COMPR
bool "Advanced compression options" bool "Advanced compression options"
depends on UBIFS_FS
help help
This option allows to explicitly choose which compressions, if any, This option allows to explicitly choose which compressions, if any,
are enabled in UBIFS. Removing compressors means inability to read are enabled in UBIFS. Removing compressors means inability to read
@ -24,7 +25,6 @@ config UBIFS_FS_ADVANCED_COMPR
config UBIFS_FS_LZO config UBIFS_FS_LZO
bool "LZO compression support" if UBIFS_FS_ADVANCED_COMPR bool "LZO compression support" if UBIFS_FS_ADVANCED_COMPR
depends on UBIFS_FS
default y default y
help help
LZO compressor is generally faster than zlib but compresses worse. LZO compressor is generally faster than zlib but compresses worse.
@ -32,14 +32,12 @@ config UBIFS_FS_LZO
config UBIFS_FS_ZLIB config UBIFS_FS_ZLIB
bool "ZLIB compression support" if UBIFS_FS_ADVANCED_COMPR bool "ZLIB compression support" if UBIFS_FS_ADVANCED_COMPR
depends on UBIFS_FS
default y default y
help help
Zlib compresses better than LZO but it is slower. Say 'Y' if unsure. Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
config UBIFS_ATIME_SUPPORT config UBIFS_ATIME_SUPPORT
bool "Access time support" if UBIFS_FS bool "Access time support"
depends on UBIFS_FS
default n default n
help help
Originally UBIFS did not support atime, because it looked like a bad idea due Originally UBIFS did not support atime, because it looked like a bad idea due
@ -54,7 +52,6 @@ config UBIFS_ATIME_SUPPORT
config UBIFS_FS_XATTR config UBIFS_FS_XATTR
bool "UBIFS XATTR support" bool "UBIFS XATTR support"
depends on UBIFS_FS
default y default y
help help
Saying Y here includes support for extended attributes (xattrs). Saying Y here includes support for extended attributes (xattrs).
@ -65,7 +62,7 @@ config UBIFS_FS_XATTR
config UBIFS_FS_ENCRYPTION config UBIFS_FS_ENCRYPTION
bool "UBIFS Encryption" bool "UBIFS Encryption"
depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK depends on UBIFS_FS_XATTR && BLOCK
select FS_ENCRYPTION select FS_ENCRYPTION
default n default n
help help
@ -76,7 +73,7 @@ config UBIFS_FS_ENCRYPTION
config UBIFS_FS_SECURITY config UBIFS_FS_SECURITY
bool "UBIFS Security Labels" bool "UBIFS Security Labels"
depends on UBIFS_FS && UBIFS_FS_XATTR depends on UBIFS_FS_XATTR
default y default y
help help
Security labels provide an access control facility to support Linux Security labels provide an access control facility to support Linux
@ -89,6 +86,7 @@ config UBIFS_FS_SECURITY
config UBIFS_FS_AUTHENTICATION config UBIFS_FS_AUTHENTICATION
bool "UBIFS authentication support" bool "UBIFS authentication support"
depends on KEYS
select CRYPTO_HMAC select CRYPTO_HMAC
help help
Enable authentication support for UBIFS. This feature offers protection Enable authentication support for UBIFS. This feature offers protection
@ -96,3 +94,5 @@ config UBIFS_FS_AUTHENTICATION
If you say yes here you should also select a hashing algorithm such as If you say yes here you should also select a hashing algorithm such as
sha256, these are not selected automatically since there are many sha256, these are not selected automatically since there are many
different options. different options.
endif # UBIFS_FS

View File

@ -1675,6 +1675,12 @@ int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
if (!ubifs_authenticated(c)) if (!ubifs_authenticated(c))
return 0; return 0;
if (!c->nroot) {
err = ubifs_read_nnode(c, NULL, 0);
if (err)
return err;
}
desc = ubifs_hash_get_desc(c); desc = ubifs_hash_get_desc(c);
if (IS_ERR(desc)) if (IS_ERR(desc))
return PTR_ERR(desc); return PTR_ERR(desc);
@ -1685,12 +1691,6 @@ int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
goto out; goto out;
} }
if (!c->nroot) {
err = ubifs_read_nnode(c, NULL, 0);
if (err)
return err;
}
cnode = (struct ubifs_cnode *)c->nroot; cnode = (struct ubifs_cnode *)c->nroot;
while (cnode) { while (cnode) {

View File

@ -212,6 +212,38 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
return ubifs_tnc_remove_range(c, &min_key, &max_key); return ubifs_tnc_remove_range(c, &min_key, &max_key);
} }
/**
* inode_still_linked - check whether inode in question will be re-linked.
* @c: UBIFS file-system description object
* @rino: replay entry to test
*
* O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
* This case needs special care, otherwise all references to the inode will
* be removed upon the first replay entry of an inode with link count 0
* is found.
*/
static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
{
struct replay_entry *r;
ubifs_assert(c, rino->deletion);
ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
/*
* Find the most recent entry for the inode behind @rino and check
* whether it is a deletion.
*/
list_for_each_entry_reverse(r, &c->replay_list, list) {
ubifs_assert(c, r->sqnum >= rino->sqnum);
if (key_inum(c, &r->key) == key_inum(c, &rino->key))
return r->deletion == 0;
}
ubifs_assert(c, 0);
return false;
}
/** /**
* apply_replay_entry - apply a replay entry to the TNC. * apply_replay_entry - apply a replay entry to the TNC.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
@ -239,6 +271,11 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
{ {
ino_t inum = key_inum(c, &r->key); ino_t inum = key_inum(c, &r->key);
if (inode_still_linked(c, r)) {
err = 0;
break;
}
err = ubifs_tnc_remove_ino(c, inum); err = ubifs_tnc_remove_ino(c, inum);
break; break;
} }
@ -533,6 +570,28 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
return data == 0xFFFFFFFF; return data == 0xFFFFFFFF;
} }
/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */
static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
{
SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
hash_desc->tfm = c->hash_tfm;
hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ubifs_shash_copy_state(c, log_hash, hash_desc);
return crypto_shash_final(hash_desc, hash);
}
static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
{
SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
hmac_desc->tfm = c->hmac_tfm;
hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
}
/** /**
* authenticate_sleb - authenticate one scan LEB * authenticate_sleb - authenticate one scan LEB
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
@ -574,21 +633,12 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
if (snod->type == UBIFS_AUTH_NODE) { if (snod->type == UBIFS_AUTH_NODE) {
struct ubifs_auth_node *auth = snod->node; struct ubifs_auth_node *auth = snod->node;
SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
hash_desc->tfm = c->hash_tfm; err = authenticate_sleb_hash(c, log_hash, hash);
hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ubifs_shash_copy_state(c, log_hash, hash_desc);
err = crypto_shash_final(hash_desc, hash);
if (err) if (err)
goto out; goto out;
hmac_desc->tfm = c->hmac_tfm; err = authenticate_sleb_hmac(c, hash, hmac);
hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_digest(hmac_desc, hash, c->hash_len,
hmac);
if (err) if (err)
goto out; goto out;

View File

@ -63,6 +63,17 @@
/* Default time granularity in nanoseconds */ /* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000 #define DEFAULT_TIME_GRAN 1000000000
static int get_default_compressor(struct ubifs_info *c)
{
if (ubifs_compr_present(c, UBIFS_COMPR_LZO))
return UBIFS_COMPR_LZO;
if (ubifs_compr_present(c, UBIFS_COMPR_ZLIB))
return UBIFS_COMPR_ZLIB;
return UBIFS_COMPR_NONE;
}
/** /**
* create_default_filesystem - format empty UBI volume. * create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
@ -207,7 +218,7 @@ static int create_default_filesystem(struct ubifs_info *c)
if (c->mount_opts.override_compr) if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else else
sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); sup->default_compr = cpu_to_le16(get_default_compressor(c));
generate_random_uuid(sup->uuid); generate_random_uuid(sup->uuid);

View File

@ -17,8 +17,10 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
struct bug_entry { #ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr; unsigned long bug_addr;
#else #else
@ -33,10 +35,8 @@ struct bug_entry {
unsigned short line; unsigned short line;
#endif #endif
unsigned short flags; unsigned short flags;
#endif /* CONFIG_GENERIC_BUG */
}; };
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_BUG
/* /*
* Don't use BUG() or BUG_ON() unless there's really no way out; one * Don't use BUG() or BUG_ON() unless there's really no way out; one

View File

@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements. * unique, to convince GCC not to merge duplicate inline asm statements.
*/ */
#define annotate_reachable() ({ \ #define annotate_reachable() ({ \
asm volatile("ANNOTATE_REACHABLE counter=%c0" \ asm volatile("%c0:\n\t" \
: : "i" (__COUNTER__)); \ ".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
}) })
#define annotate_unreachable() ({ \ #define annotate_unreachable() ({ \
asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ asm volatile("%c0:\n\t" \
: : "i" (__COUNTER__)); \ ".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
}) })
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
#else #else
#define annotate_reachable() #define annotate_reachable()
#define annotate_unreachable() #define annotate_unreachable()
@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off); return (void *)((unsigned long)off + *off);
} }
#else /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef LINKER_SCRIPT
#ifdef CONFIG_STACK_VALIDATION
.macro ANNOTATE_UNREACHABLE counter:req
\counter:
.pushsection .discard.unreachable
.long \counter\()b -.
.popsection
.endm
.macro ANNOTATE_REACHABLE counter:req
\counter:
.pushsection .discard.reachable
.long \counter\()b -.
.popsection
.endm
.macro ASM_UNREACHABLE
999:
.pushsection .discard.unreachable
.long 999b - .
.popsection
.endm
#else /* CONFIG_STACK_VALIDATION */
.macro ANNOTATE_UNREACHABLE counter:req
.endm
.macro ANNOTATE_REACHABLE counter:req
.endm
.macro ASM_UNREACHABLE
.endm
#endif /* CONFIG_STACK_VALIDATION */
#endif /* LINKER_SCRIPT */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */ /* Compile time object size, -1 for unknown */

View File

@ -1148,11 +1148,65 @@ out_error:
return ret; return ret;
} }
static int handle_exit_race(u32 __user *uaddr, u32 uval,
struct task_struct *tsk)
{
u32 uval2;
/*
* If PF_EXITPIDONE is not yet set, then try again.
*/
if (tsk && !(tsk->flags & PF_EXITPIDONE))
return -EAGAIN;
/*
* Reread the user space value to handle the following situation:
*
* CPU0 CPU1
*
* sys_exit() sys_futex()
* do_exit() futex_lock_pi()
* futex_lock_pi_atomic()
* exit_signals(tsk) No waiters:
* tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
* mm_release(tsk) Set waiter bit
* exit_robust_list(tsk) { *uaddr = 0x80000PID;
* Set owner died attach_to_pi_owner() {
* *uaddr = 0xC0000000; tsk = get_task(PID);
* } if (!tsk->flags & PF_EXITING) {
* ... attach();
* tsk->flags |= PF_EXITPIDONE; } else {
* if (!(tsk->flags & PF_EXITPIDONE))
* return -EAGAIN;
* return -ESRCH; <--- FAIL
* }
*
* Returning ESRCH unconditionally is wrong here because the
* user space value has been changed by the exiting task.
*
* The same logic applies to the case where the exiting task is
* already gone.
*/
if (get_futex_value_locked(&uval2, uaddr))
return -EFAULT;
/* If the user space value has changed, try again. */
if (uval2 != uval)
return -EAGAIN;
/*
* The exiting task did not have a robust list, the robust list was
* corrupted or the user space value in *uaddr is simply bogus.
* Give up and tell user space.
*/
return -ESRCH;
}
/* /*
* Lookup the task for the TID provided from user space and attach to * Lookup the task for the TID provided from user space and attach to
* it after doing proper sanity checks. * it after doing proper sanity checks.
*/ */
static int attach_to_pi_owner(u32 uval, union futex_key *key, static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
struct futex_pi_state **ps) struct futex_pi_state **ps)
{ {
pid_t pid = uval & FUTEX_TID_MASK; pid_t pid = uval & FUTEX_TID_MASK;
@ -1162,12 +1216,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
/* /*
* We are the first waiter - try to look up the real owner and attach * We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0 [1] * the new pi_state to it, but bail out when TID = 0 [1]
*
* The !pid check is paranoid. None of the call sites should end up
* with pid == 0, but better safe than sorry. Let the caller retry
*/ */
if (!pid) if (!pid)
return -ESRCH; return -EAGAIN;
p = find_get_task_by_vpid(pid); p = find_get_task_by_vpid(pid);
if (!p) if (!p)
return -ESRCH; return handle_exit_race(uaddr, uval, NULL);
if (unlikely(p->flags & PF_KTHREAD)) { if (unlikely(p->flags & PF_KTHREAD)) {
put_task_struct(p); put_task_struct(p);
@ -1187,7 +1244,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
* set, we know that the task has finished the * set, we know that the task has finished the
* cleanup: * cleanup:
*/ */
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; int ret = handle_exit_race(uaddr, uval, p);
raw_spin_unlock_irq(&p->pi_lock); raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p); put_task_struct(p);
@ -1244,7 +1301,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
* We are the first waiter - try to look up the owner based on * We are the first waiter - try to look up the owner based on
* @uval and attach to it. * @uval and attach to it.
*/ */
return attach_to_pi_owner(uval, key, ps); return attach_to_pi_owner(uaddr, uval, key, ps);
} }
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@ -1352,7 +1409,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* attach to the owner. If that fails, no harm done, we only * attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable. * set the FUTEX_WAITERS bit in the user space variable.
*/ */
return attach_to_pi_owner(uval, key, ps); return attach_to_pi_owner(uaddr, newval, key, ps);
} }
/** /**

View File

@ -289,9 +289,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
{ {
struct hrtimer *timer = &timr->it.real.timer; struct hrtimer *timer = &timr->it.real.timer;
if (!timr->it_interval)
return;
timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
timr->it_interval); timr->it_interval);
hrtimer_restart(timer); hrtimer_restart(timer);
@ -317,7 +314,7 @@ void posixtimer_rearm(struct kernel_siginfo *info)
if (!timr) if (!timr)
return; return;
if (timr->it_requeue_pending == info->si_sys_private) { if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
timr->kclock->timer_rearm(timr); timr->kclock->timer_rearm(timr);
timr->it_active = 1; timr->it_active = 1;

View File

@ -94,6 +94,9 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
} }
while (len) { while (len) {
if (sk_msg_full(dst))
return -ENOSPC;
sge_len = sge->length - off; sge_len = sge->length - off;
sge_off = sge->offset + off; sge_off = sge->offset + off;
if (sge_len > len) if (sge_len > len)

View File

@ -998,7 +998,9 @@ next_chunk:
if (!inet_diag_bc_sk(bc, sk)) if (!inet_diag_bc_sk(bc, sk))
goto next_normal; goto next_normal;
sock_hold(sk); if (!refcount_inc_not_zero(&sk->sk_refcnt))
goto next_normal;
num_arr[accum] = num; num_arr[accum] = num;
sk_arr[accum] = sk; sk_arr[accum] = sk;
if (++accum == SKARR_SZ) if (++accum == SKARR_SZ)

View File

@ -901,6 +901,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
goto drop; goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop; goto drop;
ipv6h = ipv6_hdr(skb);
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
goto drop; goto drop;
if (iptunnel_pull_header(skb, 0, tpi->proto, false)) if (iptunnel_pull_header(skb, 0, tpi->proto, false))

View File

@ -318,6 +318,7 @@ static int vti6_rcv(struct sk_buff *skb)
return 0; return 0;
} }
ipv6h = ipv6_hdr(skb);
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++; t->dev->stats.rx_dropped++;
rcu_read_unlock(); rcu_read_unlock();

View File

@ -384,6 +384,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
if (skb_try_coalesce(head, fp, &headstolen, &delta)) { if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
kfree_skb_partial(fp, headstolen); kfree_skb_partial(fp, headstolen);
} else { } else {
fp->sk = NULL;
if (!skb_shinfo(head)->frag_list) if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = fp; skb_shinfo(head)->frag_list = fp;
head->data_len += fp->len; head->data_len += fp->len;

View File

@ -2627,6 +2627,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
proto = saddr->sll_protocol; proto = saddr->sll_protocol;
addr = saddr->sll_addr; addr = saddr->sll_addr;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out;
} }
err = -ENXIO; err = -ENXIO;
@ -2825,6 +2827,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
proto = saddr->sll_protocol; proto = saddr->sll_protocol;
addr = saddr->sll_addr; addr = saddr->sll_addr;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out;
} }
err = -ENXIO; err = -ENXIO;

View File

@ -943,10 +943,12 @@ fallback_to_reg_send:
tls_ctx->tx.overhead_size); tls_ctx->tx.overhead_size);
} }
ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_pl, if (try_to_copy) {
try_to_copy); ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
if (ret < 0) msg_pl, try_to_copy);
goto trim_sgl; if (ret < 0)
goto trim_sgl;
}
/* Open records defined only if successfully copied, otherwise /* Open records defined only if successfully copied, otherwise
* we would trim the sg but not reset the open record frags. * we would trim the sg but not reset the open record frags.

View File

@ -115,9 +115,7 @@ __cc-option = $(call try-run,\
# Do not attempt to build with gcc plugins during cc-option tests. # Do not attempt to build with gcc plugins during cc-option tests.
# (And this uses delayed resolution so the flags will be up to date.) # (And this uses delayed resolution so the flags will be up to date.)
# In addition, do not include the asm macros which are built later. CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
CC_OPTION_FILTERED = $(GCC_PLUGINS_CFLAGS) $(ASM_MACRO_FLAGS)
CC_OPTION_CFLAGS = $(filter-out $(CC_OPTION_FILTERED),$(KBUILD_CFLAGS))
# cc-option # cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)

View File

@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y
hostprogs-y := modpost mk_elfconfig hostprogs-y := modpost mk_elfconfig
always := $(hostprogs-y) empty.o always := $(hostprogs-y) empty.o
CFLAGS_REMOVE_empty.o := $(ASM_MACRO_FLAGS)
modpost-objs := modpost.o file2alias.o sumversion.o modpost-objs := modpost.o file2alias.o sumversion.o
devicetable-offsets-file := devicetable-offsets.h devicetable-offsets-file := devicetable-offsets.h