Merge linux 6.6.79 into TK5 release branch

Conflicts:
	Makefile

Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Jianping Liu 2025-02-24 17:23:23 +08:00
commit 69a81d40fe
151 changed files with 2107 additions and 845 deletions

View File

@ -174,22 +174,28 @@ HWCAP2_DCPODP
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
HWCAP2_SVE2
Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.SVEver == 0b0001.
HWCAP2_SVEAES
Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.AES == 0b0001.
HWCAP2_SVEPMULL
Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.AES == 0b0010.
HWCAP2_SVEBITPERM
Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.BitPerm == 0b0001.
HWCAP2_SVESHA3
Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.SHA3 == 0b0001.
HWCAP2_SVESM4
Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.SM4 == 0b0001.
HWCAP2_FLAGM2
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010.
@ -198,16 +204,20 @@ HWCAP2_FRINT
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
HWCAP2_SVEI8MM
Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.I8MM == 0b0001.
HWCAP2_SVEF32MM
Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.F32MM == 0b0001.
HWCAP2_SVEF64MM
Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.F64MM == 0b0001.
HWCAP2_SVEBF16
Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.BF16 == 0b0001.
HWCAP2_I8MM
Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
@ -273,7 +283,8 @@ HWCAP2_EBF16
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
HWCAP2_SVE_EBF16
Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.BF16 == 0b0010.
HWCAP2_CSSC
Functionality implied by ID_AA64ISAR2_EL1.CSSC == 0b0001.
@ -282,7 +293,8 @@ HWCAP2_RPRFM
Functionality implied by ID_AA64ISAR2_EL1.RPRFM == 0b0001.
HWCAP2_SVE2P1
Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0010.
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
ID_AA64ZFR0_EL1.SVEver == 0b0010.
HWCAP2_SME2
Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0001.

View File

@ -22,7 +22,7 @@ description:
Each sub-node is identified using the node's name, with valid values listed
for each of the pmics below.
For mp5496, s1, s2
For mp5496, s1, s2, l2, l5
For pm2250, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22

View File

@ -8,7 +8,7 @@ else
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 78
SUBLEVEL = 79
EXTRAVERSION =
NAME = Pinguïn Aangedreven
@ -1062,8 +1062,8 @@ LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
endif
# Align the bit size of userspace programs with the kernel
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
@ -1362,18 +1362,13 @@ $(tools-clean-targets):
$(Q)$(MAKE) -sC tools $@_clean
tools_clean: $(tools-clean-targets)
# Clear a bunch of variables before executing the submake
ifeq ($(quiet),silent_)
tools_silent=s
endif
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
# ---------------------------------------------------------------------------
# Kernel selftest

View File

@ -42,6 +42,8 @@ struct pt_regs {
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
/* This makes the stack 16-byte aligned as GCC expects */
unsigned long __pad0;
/* These are saved by PAL-code: */
unsigned long ps;
unsigned long pc;

View File

@ -34,7 +34,9 @@ void foo(void)
DEFINE(CRED_EGID, offsetof(struct cred, egid));
BLANK();
DEFINE(SP_OFF, offsetof(struct pt_regs, ps));
DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
DEFINE(PT_PTRACED, PT_PTRACED);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);

View File

@ -15,10 +15,6 @@
.set noat
.cfi_sections .debug_frame
/* Stack offsets. */
#define SP_OFF 184
#define SWITCH_STACK_SIZE 64
.macro CFI_START_OSF_FRAME func
.align 4
.globl \func
@ -198,8 +194,8 @@ CFI_END_OSF_FRAME entArith
CFI_START_OSF_FRAME entMM
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subq $sp, 56, $sp
.cfi_adjust_cfa_offset 56
subq $sp, 64, $sp
.cfi_adjust_cfa_offset 64
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@ -214,7 +210,7 @@ CFI_START_OSF_FRAME entMM
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
addq $sp, 56, $19
addq $sp, 64, $19
/* handle the fault */
lda $8, 0x3fff
bic $sp, $8, $8
@ -227,7 +223,7 @@ CFI_START_OSF_FRAME entMM
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
addq $sp, 56, $sp
addq $sp, 64, $sp
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
@ -235,7 +231,7 @@ CFI_START_OSF_FRAME entMM
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
.cfi_adjust_cfa_offset -64
/* finish up the syscall as normal. */
br ret_from_sys_call
CFI_END_OSF_FRAME entMM
@ -382,8 +378,8 @@ entUnaUser:
.cfi_restore $0
.cfi_adjust_cfa_offset -256
SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp)
.cfi_adjust_cfa_offset 56
lda $sp, -64($sp)
.cfi_adjust_cfa_offset 64
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@ -399,7 +395,7 @@ entUnaUser:
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
lda $8, 0x3fff
addq $sp, 56, $19
addq $sp, 64, $19
bic $sp, $8, $8
jsr $26, do_entUnaUser
ldq $9, 0($sp)
@ -409,7 +405,7 @@ entUnaUser:
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
lda $sp, 56($sp)
lda $sp, 64($sp)
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
@ -417,7 +413,7 @@ entUnaUser:
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
.cfi_adjust_cfa_offset -64
br ret_from_sys_call
CFI_END_OSF_FRAME entUna

View File

@ -707,7 +707,7 @@ s_reg_to_mem (unsigned long s_reg)
static int unauser_reg_offsets[32] = {
R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
/* r9 ... r15 are stored in front of regs. */
-56, -48, -40, -32, -24, -16, -8,
-64, -56, -48, -40, -32, -24, -16, /* padding at -8 */
R(r16), R(r17), R(r18),
R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
R(r27), R(r28), R(gp),

View File

@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
(r) <= 18 ? (r)+10 : (r)-10])
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 : \
(r) <= 18 ? (r)+11 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,

View File

@ -101,16 +101,18 @@ int populate_cache_leaves(unsigned int cpu)
unsigned int level, idx;
enum cache_type type;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct cacheinfo *infos = this_cpu_ci->info_list;
for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
idx < this_cpu_ci->num_leaves; level++) {
type = get_cache_type(level);
if (type == CACHE_TYPE_SEPARATE) {
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (idx + 1 >= this_cpu_ci->num_leaves)
break;
ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level);
ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level);
} else {
ci_leaf_init(this_leaf++, type, level);
ci_leaf_init(&infos[idx++], type, level);
}
}
return 0;

View File

@ -2762,6 +2762,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = match, \
}
#define HWCAP_CAP_MATCH_ID(match, reg, field, min_value, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
HWCAP_CPUID_MATCH(reg, field, min_value) \
.matches = match, \
}
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
@ -2790,6 +2797,13 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
};
#endif
#ifdef CONFIG_ARM64_SVE
static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
{
return system_supports_sve() && has_user_cpuid_feature(cap, scope);
}
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
@ -2827,18 +2841,18 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif
HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_BTI

View File

@ -38,6 +38,7 @@ SECTIONS
*/
/DISCARD/ : {
*(.note.GNU-stack .note.gnu.property)
*(.ARM.attributes)
}
.note : { *(.note.*) } :text :note

View File

@ -162,6 +162,7 @@ SECTIONS
/DISCARD/ : {
*(.interp .dynamic)
*(.dynsym .dynstr .hash .gnu.hash)
*(.ARM.attributes)
}
. = KIMAGE_VADDR;

View File

@ -18,16 +18,19 @@
.align 5
SYM_FUNC_START(__arch_cpu_idle)
/* start of rollback region */
LONG_L t0, tp, TI_FLAGS
nop
andi t0, t0, _TIF_NEED_RESCHED
bnez t0, 1f
nop
nop
nop
/* start of idle interrupt region */
ori t0, zero, CSR_CRMD_IE
/* idle instruction needs irq enabled */
csrxchg t0, t0, LOONGARCH_CSR_CRMD
/*
* If an interrupt lands here; between enabling interrupts above and
* going idle on the next instruction, we must *NOT* go idle since the
* interrupt could have set TIF_NEED_RESCHED or caused an timer to need
* reprogramming. Fall through -- see handle_vint() below -- and have
* the idle loop take care of things.
*/
idle 0
/* end of rollback region */
/* end of idle interrupt region */
1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
@ -35,11 +38,10 @@ SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
la_abs t1, 1b
LONG_L t0, sp, PT_ERA
/* 32 byte rollback region */
ori t0, t0, 0x1f
xori t0, t0, 0x1f
/* 3 instructions idle interrupt region */
ori t0, t0, 0b1100
bne t0, t1, 1f
LONG_S t0, sp, PT_ERA
1: move a0, sp

View File

@ -11,7 +11,6 @@
void __cpuidle arch_cpu_idle(void)
{
raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */
__arch_cpu_idle();
raw_local_irq_disable();
}

View File

@ -33,7 +33,7 @@ void machine_halt(void)
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}
@ -54,7 +54,7 @@ void machine_power_off(void)
#endif
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}
@ -75,6 +75,6 @@ void machine_restart(char *command)
acpi_reboot();
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}

View File

@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
if (unlikely(len == 0))
if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;

View File

@ -4845,8 +4845,11 @@ static void intel_pmu_cpu_starting(int cpu)
init_debug_store_on_cpu(cpu);
/*
* Deal with CPUs that don't clear their LBRs on power-up.
* Deal with CPUs that don't clear their LBRs on power-up, and that may
* even boot with LBRs enabled.
*/
if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
intel_pmu_lbr_reset();
cpuc->lbr_sel = NULL;

View File

@ -40,6 +40,8 @@ typedef struct {
*/
atomic64_t tlb_gen;
unsigned long next_trim_cpumask;
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct rw_semaphore ldt_usr_sem;
struct ldt_struct *ldt;

View File

@ -145,6 +145,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
atomic64_set(&mm->context.tlb_gen, 0);
mm->context.next_trim_cpumask = jiffies + HZ;
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {

View File

@ -376,7 +376,8 @@
#define MSR_IA32_PASID_VALID BIT_ULL(31)
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_LBR_BIT 0 /* last branch recording */
#define DEBUGCTLMSR_LBR (1UL << DEBUGCTLMSR_LBR_BIT)
#define DEBUGCTLMSR_BTF_SHIFT 1
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
#define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2)

View File

@ -222,6 +222,7 @@ struct flush_tlb_info {
unsigned int initiating_cpu;
u8 stride_shift;
u8 freed_tables;
u8 trim_cpumask;
};
void flush_tlb_local(void);

View File

@ -8,6 +8,7 @@
#include <linux/timex.h>
#include <linux/i8253.h>
#include <asm/hypervisor.h>
#include <asm/apic.h>
#include <asm/hpet.h>
#include <asm/time.h>
@ -39,9 +40,15 @@ static bool __init use_pit(void)
bool __init pit_timer_init(void)
{
if (!use_pit())
if (!use_pit()) {
/*
* Don't just ignore the PIT. Ensure it's stopped, because
* VMMs otherwise steal CPU time just to pointlessly waggle
* the (masked) IRQ.
*/
clockevent_i8253_disable();
return false;
}
clockevent_i8253_init(true);
global_clock_event = &i8253_clockevent;
return true;

View File

@ -175,7 +175,6 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
noinstr void __static_call_update_early(void *tramp, void *func)
{
BUG_ON(system_state != SYSTEM_BOOTING);
BUG_ON(!early_boot_irqs_disabled);
BUG_ON(static_call_initialized);
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
sync_core();

View File

@ -2175,6 +2175,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
u32 vector;
bool all_cpus;
if (!lapic_in_kernel(vcpu))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
if (hc->code == HVCALL_SEND_IPI) {
if (!hc->fast) {
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
@ -2801,7 +2804,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
if (!vcpu || lapic_in_kernel(vcpu))
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
if (evmcs_ver)
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;

View File

@ -5289,7 +5289,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
union kvm_mmu_page_role root_role;
/* NPT requires CR0.PG=1. */
WARN_ON_ONCE(cpu_role.base.direct);
WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
root_role = cpu_role.base;
root_role.level = kvm_mmu_get_tdp_level(vcpu);

View File

@ -644,6 +644,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
u32 pause_count12;
u32 pause_thresh12;
nested_svm_transition_tlb_flush(vcpu);
/* Enter Guest-Mode */
enter_guest_mode(vcpu);
/*
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
@ -760,11 +765,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
}
}
nested_svm_transition_tlb_flush(vcpu);
/* Enter Guest-Mode */
enter_guest_mode(vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
* guest-mode to take effect.

View File

@ -898,9 +898,36 @@ done:
nr_invalidate);
}
static bool tlb_is_not_lazy(int cpu, void *data)
static bool should_flush_tlb(int cpu, void *data)
{
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
struct flush_tlb_info *info = data;
/* Lazy TLB will get flushed at the next context switch. */
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
return false;
/* No mm means kernel memory flush. */
if (!info->mm)
return true;
/* The target mm is loaded, and the CPU is not lazy. */
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
return true;
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
if (info->trim_cpumask)
return true;
return false;
}
static bool should_trim_cpumask(struct mm_struct *mm)
{
if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
return true;
}
return false;
}
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
@ -934,7 +961,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
if (info->freed_tables)
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
(void *)info, 1, cpumask);
}
@ -985,6 +1012,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
info->freed_tables = freed_tables;
info->new_tlb_gen = new_tlb_gen;
info->initiating_cpu = smp_processor_id();
info->trim_cpumask = 0;
return info;
}
@ -1027,6 +1055,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
* flush_tlb_func_local() directly in this case.
*/
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
info->trim_cpumask = should_trim_cpumask(mm);
flush_tlb_multi(mm_cpumask(mm), info);
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
lockdep_assert_irqs_enabled();

View File

@ -113,6 +113,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
*/
static DEFINE_SPINLOCK(xen_reservation_lock);
/* Protected by xen_reservation_lock. */
#define MIN_CONTIG_ORDER 9 /* 2MB */
static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
static unsigned long *discontig_frames __refdata = discontig_frames_early;
static bool discontig_frames_dyn;
static int alloc_discontig_frames(unsigned int order)
{
unsigned long *new_array, *old_array;
unsigned int old_order;
unsigned long flags;
BUG_ON(order < MIN_CONTIG_ORDER);
BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
order - MIN_CONTIG_ORDER);
if (!new_array)
return -ENOMEM;
spin_lock_irqsave(&xen_reservation_lock, flags);
old_order = discontig_frames_order;
if (order > discontig_frames_order || !discontig_frames_dyn) {
if (!discontig_frames_dyn)
old_array = NULL;
else
old_array = discontig_frames;
discontig_frames = new_array;
discontig_frames_order = order;
discontig_frames_dyn = true;
} else {
old_array = new_array;
}
spin_unlock_irqrestore(&xen_reservation_lock, flags);
free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
return 0;
}
/*
* Note about cr3 (pagetable base) values:
*
@ -782,6 +827,7 @@ void xen_mm_pin_all(void)
{
struct page *page;
spin_lock(&init_mm.page_table_lock);
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
@ -792,6 +838,7 @@ void xen_mm_pin_all(void)
}
spin_unlock(&pgd_lock);
spin_unlock(&init_mm.page_table_lock);
}
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
@ -813,6 +860,9 @@ static void __init xen_after_bootmem(void)
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
if (alloc_discontig_frames(MIN_CONTIG_ORDER))
BUG();
}
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
@ -888,6 +938,7 @@ void xen_mm_unpin_all(void)
{
struct page *page;
spin_lock(&init_mm.page_table_lock);
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
@ -899,6 +950,7 @@ void xen_mm_unpin_all(void)
}
spin_unlock(&pgd_lock);
spin_unlock(&init_mm.page_table_lock);
}
static void xen_enter_mmap(struct mm_struct *mm)
@ -2199,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
#define MAX_CONTIG_ORDER 9 /* 2MB */
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
unsigned long *in_frames,
@ -2319,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long *in_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
if (unlikely(order > discontig_frames_order)) {
if (!discontig_frames_dyn)
return -ENOMEM;
if (alloc_discontig_frames(order))
return -ENOMEM;
}
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
in_frames = discontig_frames;
/* 1. Zap current PTEs, remembering MFNs. */
xen_zap_pfn_range(vstart, order, in_frames, NULL);
@ -2354,12 +2409,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long *out_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
if (unlikely(order > MAX_CONTIG_ORDER))
if (unlikely(order > discontig_frames_order))
return;
vstart = (unsigned long)phys_to_virt(pstart);
@ -2367,6 +2422,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
spin_lock_irqsave(&xen_reservation_lock, flags);
out_frames = discontig_frames;
/* 1. Find start MFN of contiguous extent. */
in_frame = virt_to_mfn((void *)vstart);

View File

@ -51,13 +51,25 @@ int mac_partition(struct parsed_partitions *state)
}
secsize = be16_to_cpu(md->block_size);
put_dev_sector(sect);
/*
* If the "block size" is not a power of 2, things get weird - we might
* end up with a partition straddling a sector boundary, so we wouldn't
* be able to read a partition entry with read_part_sector().
* Real block sizes are probably (?) powers of two, so just require
* that.
*/
if (!is_power_of_2(secsize))
return -1;
datasize = round_down(secsize, 512);
data = read_part_sector(state, datasize / 512, &sect);
if (!data)
return -1;
partoffset = secsize % 512;
if (partoffset + sizeof(*part) > datasize)
if (partoffset + sizeof(*part) > datasize) {
put_dev_sector(sect);
return -1;
}
part = (struct mac_partition *) (data + partoffset);
if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
put_dev_sector(sect);
@ -110,8 +122,8 @@ int mac_partition(struct parsed_partitions *state)
int i, l;
goodness++;
l = strlen(part->name);
if (strcmp(part->name, "/") == 0)
l = strnlen(part->name, sizeof(part->name));
if (strncmp(part->name, "/", sizeof(part->name)) == 0)
goodness++;
for (i = 0; i <= l - 4; ++i) {
if (strncasecmp(part->name + i, "root",

View File

@ -400,6 +400,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
/* Vexia Edu Atla 10 tablet 5V version */
.matches = {
/* Having all 3 of these not set is somewhat unique */
DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
/* Above strings are too generic, also match on BIOS date */
DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
/* Vexia Edu Atla 10 tablet 9V version */
.matches = {

View File

@ -894,6 +894,7 @@ err_alloc:
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->main_status_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
@ -969,6 +970,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
if (d->config_buf) {

View File

@ -108,11 +108,8 @@ int __init clocksource_i8253_init(void)
#endif
#ifdef CONFIG_CLKEVT_I8253
static int pit_shutdown(struct clock_event_device *evt)
void clockevent_i8253_disable(void)
{
if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
return 0;
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
@ -123,6 +120,14 @@ static int pit_shutdown(struct clock_event_device *evt)
}
raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)
{
if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
return 0;
clockevent_i8253_disable();
return 0;
}

View File

@ -908,13 +908,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
EFI_MEMORY_RUNTIME))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
"|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
"|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
attr & EFI_MEMORY_SP ? "SP" : "",

View File

@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
return 0;
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return 0;

View File

@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
continue;
if (efi_soft_reserve_enabled() &&
(desc->attribute & EFI_MEMORY_SP))
continue;

View File

@ -69,6 +69,22 @@ struct bcm_kona_gpio {
struct bcm_kona_gpio_bank {
int id;
int irq;
/*
* Used to keep track of lock/unlock operations for each GPIO in the
* bank.
*
* All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
* unlock count for all GPIOs is 0 by default. Each unlock increments
* the counter, and each lock decrements the counter.
*
* The lock function only locks the GPIO once its unlock counter is
* down to 0. This is necessary because the GPIO is unlocked in two
* places in this driver: once for requested GPIOs, and once for
* requested IRQs. Since it is possible for a GPIO to be requested
* as both a GPIO and an IRQ, we need to ensure that we don't lock it
* too early.
*/
u8 gpio_unlock_count[GPIO_PER_BANK];
/* Used in the interrupt handler */
struct bcm_kona_gpio *kona_gpio;
};
@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
if (bank->gpio_unlock_count[bit] == 0) {
dev_err(kona_gpio->gpio_chip.parent,
"Unbalanced locks for GPIO %u\n", gpio);
return;
}
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val |= BIT(gpio);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
if (--bank->gpio_unlock_count[bit] == 0) {
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val |= BIT(bit);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
}
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
if (bank->gpio_unlock_count[bit] == 0) {
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val &= ~BIT(gpio);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val &= ~BIT(bit);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
++bank->gpio_unlock_count[bit];
}
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
unsigned int gpio = d->hwirq;
return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
/*
* We need to unlock the GPIO before any other operations are performed
* on the relevant GPIO configuration registers
*/
bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
}
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
unsigned int gpio = d->hwirq;
gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
/* Once we no longer use it, lock the GPIO again */
bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
}
static struct irq_chip bcm_gpio_irq_chip = {
@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bank->irq = platform_get_irq(pdev, i);
bank->kona_gpio = kona_gpio;
if (bank->irq < 0) {
dev_err(dev, "Couldn't get IRQ for bank %d", i);
dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
ret = -ENOENT;
goto err_irq_domain;
}

View File

@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
};
int i, j;
int ret, i, j;
/*
* STMPE1600: to be able to get IRQ from pins,
@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
* GPSR or GPCR registers
*/
if (stmpe->partnum == STMPE1600) {
stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
if (ret < 0) {
dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
goto err;
}
ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
if (ret < 0) {
dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
goto err;
}
}
for (i = 0; i < CACHE_NR_REGS; i++) {
@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
}
}
err:
mutex_unlock(&stmpe_gpio->irq_lock);
}

View File

@ -1706,6 +1706,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "PNP0C50:00@8",
},
},
{
/*
* Spurious wakeups from GPIO 11
* Found in BIOS 1.04
* https://gitlab.freedesktop.org/drm/amd/-/issues/3954
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
},
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
.ignore_interrupt = "AMDI0030:00@11",
},
},
{} /* Terminating entry */
};

View File

@ -723,13 +723,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
}
if (gc->ngpio == 0) {
chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
return -EINVAL;
}
if (gc->ngpio > FASTPATH_NGPIO)
chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
gc->ngpio, FASTPATH_NGPIO);
dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
gc->ngpio, FASTPATH_NGPIO);
return 0;
}

View File

@ -3450,9 +3450,10 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
err = 0;
goto out;
} else {
dev_err(adev->dev, "fail to initialize cap microcode\n");
}
dev_err(adev->dev, "fail to initialize cap microcode\n");
goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];

View File

@ -2026,6 +2026,7 @@ bool dcn20_fast_validate_bw(
{
bool out = false;
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@ -2050,7 +2051,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)

View File

@ -1002,8 +1002,10 @@ static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe)
if (!head_pipe) {
ASSERT(0);
return NULL;
}
if (!idle_pipe)
return NULL;

View File

@ -800,6 +800,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
{
bool out = false;
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@ -842,7 +843,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
goto validate_fail;
}
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];

View File

@ -517,7 +517,8 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
if (!smu_table->hardcode_pptable) {
if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable)
return -ENOMEM;

View File

@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
return PTR_ERR(ppgtt);
if (!ppgtt->vm.allocate_va_range)
goto err_ppgtt_cleanup;
goto ppgtt_vm_put;
/*
* While we only allocate the page tables here and so we could
@ -236,7 +236,7 @@ err_ppgtt_cleanup:
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
ppgtt_vm_put:
i915_vm_put(&ppgtt->vm);
return err;
}

View File

@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
for (timeout = 10; timeout > 0; --timeout) {
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
(rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
break;
usleep_range(1000, 2000);

View File

@ -142,7 +142,6 @@
#define CLOCKSET1 0x101c
#define CLOCKSET1_LOCK_PHY (1 << 17)
#define CLOCKSET1_LOCK (1 << 16)
#define CLOCKSET1_CLKSEL (1 << 8)
#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
#define CLOCKSET1_CLKINSEL_DIG (1 << 2)

View File

@ -646,7 +646,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
{
dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
/* clear the irqstatus for newly enabled irqs */
/* clear the irqstatus for irqs that will be enabled */
dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
dispc_k2g_vp_set_irqenable(dispc, 0, mask);
@ -654,6 +654,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
/* clear the irqstatus for irqs that were disabled */
dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
/* flush posted write */
dispc_k2g_read_irqenable(dispc);
}
@ -726,24 +729,20 @@ static
void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
{
unsigned int i;
u32 top_clear = 0;
for (i = 0; i < dispc->feat->num_vps; ++i) {
if (clearmask & DSS_IRQ_VP_MASK(i)) {
if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
top_clear |= BIT(i);
}
}
for (i = 0; i < dispc->feat->num_planes; ++i) {
if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
top_clear |= BIT(4 + i);
}
}
if (dispc->feat->subrev == DISPC_K2G)
return;
dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
/* always clear the top level irqstatus */
dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQSTATUS);
@ -789,7 +788,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
old_mask = dispc_k3_read_irqenable(dispc);
/* clear the irqstatus for newly enabled irqs */
/* clear the irqstatus for irqs that will be enabled */
dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
for (i = 0; i < dispc->feat->num_vps; ++i) {
@ -814,6 +813,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
if (main_disable)
dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
/* clear the irqstatus for irqs that were disabled */
dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQENABLE_SET);
}

View File

@ -179,6 +179,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
{
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_perfmon_destroy *req = data;
struct v3d_dev *v3d = v3d_priv->v3d;
struct v3d_perfmon *perfmon;
mutex_lock(&v3d_priv->perfmon.lock);
@ -188,6 +189,10 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
if (!perfmon)
return -EINVAL;
/* If the active perfmon is being destroyed, stop it first */
if (perfmon == v3d->active_perfmon)
v3d_perfmon_stop(v3d, perfmon, false);
v3d_perfmon_put(perfmon);
return 0;

View File

@ -1668,9 +1668,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
break;
}
if (suffix)
if (suffix) {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
if (!hi->input->name)
return -ENOMEM;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -171,7 +171,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
b_ep = ep->desc.bEndpointAddress;
/* Are the expected endpoints present? */
u8 ep_addr[1] = {b_ep};
u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
hid_err(hdev, "Unexpected non-int endpoint\n");

View File

@ -452,7 +452,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
efa_destroy_eqs(dev);
efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@ -623,12 +622,14 @@ err_disable_device:
return ERR_PTR(err);
}
static void efa_remove_device(struct pci_dev *pdev)
static void efa_remove_device(struct pci_dev *pdev,
enum efa_regs_reset_reason_types reset_reason)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
struct efa_com_dev *edev;
edev = &dev->edev;
efa_com_dev_reset(edev, reset_reason);
efa_com_admin_destroy(edev);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
@ -656,7 +657,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_remove_device:
efa_remove_device(pdev);
efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
return err;
}
@ -665,7 +666,7 @@ static void efa_remove(struct pci_dev *pdev)
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_ib_device_remove(dev);
efa_remove_device(pdev);
efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
}
static struct pci_driver efa_pci_driver = {

View File

@ -1465,22 +1465,12 @@ __acquires(bitmap->lock)
&(bitmap->bp[page].map[pageoff]);
}
int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors)
{
if (!bitmap)
return 0;
if (behind) {
int bw;
atomic_inc(&bitmap->behind_writes);
bw = atomic_read(&bitmap->behind_writes);
if (bw > bitmap->behind_writes_used)
bitmap->behind_writes_used = bw;
pr_debug("inc write-behind count %d/%lu\n",
bw, bitmap->mddev->bitmap_info.max_write_behind);
}
while (sectors) {
sector_t blocks;
bitmap_counter_t *bmc;
@ -1527,20 +1517,12 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s
}
return 0;
}
EXPORT_SYMBOL(md_bitmap_startwrite);
void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int success, int behind)
unsigned long sectors)
{
if (!bitmap)
return;
if (behind) {
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
pr_debug("dec write-behind count %d/%lu\n",
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
while (sectors) {
sector_t blocks;
@ -1554,15 +1536,16 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
return;
}
if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
}
if (!success && !NEEDED(*bmc))
if (!bitmap->mddev->degraded) {
if (bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
sysfs_notify_dirent_safe(
bitmap->sysfs_can_clear);
}
} else if (!NEEDED(*bmc)) {
*bmc |= NEEDED_MASK;
}
if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait);
@ -1580,7 +1563,6 @@ void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
sectors = 0;
}
}
EXPORT_SYMBOL(md_bitmap_endwrite);
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
int degraded)
@ -1842,6 +1824,39 @@ void md_bitmap_free(struct bitmap *bitmap)
}
EXPORT_SYMBOL(md_bitmap_free);
void md_bitmap_start_behind_write(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
int bw;
if (!bitmap)
return;
atomic_inc(&bitmap->behind_writes);
bw = atomic_read(&bitmap->behind_writes);
if (bw > bitmap->behind_writes_used)
bitmap->behind_writes_used = bw;
pr_debug("inc write-behind count %d/%lu\n",
bw, bitmap->mddev->bitmap_info.max_write_behind);
}
EXPORT_SYMBOL_GPL(md_bitmap_start_behind_write);
void md_bitmap_end_behind_write(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return;
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
pr_debug("dec write-behind count %d/%lu\n",
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
EXPORT_SYMBOL_GPL(md_bitmap_end_behind_write);
void md_bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;

View File

@ -253,9 +253,11 @@ void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long
/* these are exported */
int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int behind);
unsigned long sectors);
void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int success, int behind);
unsigned long sectors);
void md_bitmap_start_behind_write(struct mddev *mddev);
void md_bitmap_end_behind_write(struct mddev *mddev);
int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void md_bitmap_close_sync(struct bitmap *bitmap);

View File

@ -8730,12 +8730,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
static void md_bitmap_start(struct mddev *mddev,
struct md_io_clone *md_io_clone)
{
if (mddev->pers->bitmap_sector)
mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
&md_io_clone->sectors);
md_bitmap_startwrite(mddev->bitmap, md_io_clone->offset,
md_io_clone->sectors);
}
static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
{
md_bitmap_endwrite(mddev->bitmap, md_io_clone->offset,
md_io_clone->sectors);
}
static void md_end_clone_io(struct bio *bio)
{
struct md_io_clone *md_io_clone = bio->bi_private;
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
@ -8760,6 +8780,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
md_io_clone->offset = (*bio)->bi_iter.bi_sector;
md_io_clone->sectors = bio_sectors(*bio);
md_bitmap_start(mddev, md_io_clone);
}
clone->bi_end_io = md_end_clone_io;
clone->bi_private = md_io_clone;
*bio = clone;

View File

@ -661,6 +661,9 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
/* convert io ranges from array to bitmap */
void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
unsigned long *sectors);
};
struct md_sysfs_entry {
@ -743,6 +746,8 @@ struct md_io_clone {
struct mddev *mddev;
struct bio *orig_bio;
unsigned long start_time;
sector_t offset;
unsigned long sectors;
struct bio bio_clone;
};

View File

@ -419,11 +419,8 @@ static void close_write(struct r1bio *r1_bio)
bio_put(r1_bio->behind_master_bio);
r1_bio->behind_master_bio = NULL;
}
/* clear the bitmap if all writes complete successfully */
md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
r1_bio->sectors,
!test_bit(R1BIO_Degraded, &r1_bio->state),
test_bit(R1BIO_BehindIO, &r1_bio->state));
if (test_bit(R1BIO_BehindIO, &r1_bio->state))
md_bitmap_end_behind_write(r1_bio->mddev);
md_write_end(r1_bio->mddev);
}
@ -480,8 +477,6 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
/* Fail the request */
set_bit(R1BIO_Degraded, &r1_bio->state);
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;
@ -1414,11 +1409,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
break;
}
r1_bio->bios[i] = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
set_bit(R1BIO_Degraded, &r1_bio->state);
if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
}
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
@ -1444,16 +1436,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
/* We don't set R1BIO_Degraded as that
* only applies if the disk is
* missing, so it might be re-added,
* and we want to know to recover this
* chunk.
* In this case the device is here,
* and the fact that this chunk is not
* in-sync is recorded in the bad
* block log
*/
continue;
}
if (is_bad) {
@ -1530,8 +1512,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
alloc_behind_master_bio(r1_bio, bio);
}
md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
test_bit(R1BIO_BehindIO, &r1_bio->state));
if (test_bit(R1BIO_BehindIO, &r1_bio->state))
md_bitmap_start_behind_write(mddev);
first_clone = 0;
}
@ -2476,12 +2458,9 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* errors.
*/
fail = true;
if (!narrow_write_error(r1_bio, m)) {
if (!narrow_write_error(r1_bio, m))
md_error(conf->mddev,
conf->mirrors[m].rdev);
/* an I/O failed, we can't clear the bitmap */
set_bit(R1BIO_Degraded, &r1_bio->state);
}
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
@ -2573,8 +2552,6 @@ static void raid1d(struct md_thread *thread)
list_del(&r1_bio->retry_list);
idx = sector_to_idx(r1_bio->sector);
atomic_dec(&conf->nr_queued[idx]);
if (mddev->degraded)
set_bit(R1BIO_Degraded, &r1_bio->state);
if (test_bit(R1BIO_WriteError, &r1_bio->state))
close_write(r1_bio);
raid_end_bio_io(r1_bio);

View File

@ -187,7 +187,6 @@ struct r1bio {
enum r1bio_state {
R1BIO_Uptodate,
R1BIO_IsSync,
R1BIO_Degraded,
R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.

View File

@ -427,11 +427,6 @@ static void raid10_end_read_request(struct bio *bio)
static void close_write(struct r10bio *r10_bio)
{
/* clear the bitmap if all writes complete successfully */
md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
r10_bio->sectors,
!test_bit(R10BIO_Degraded, &r10_bio->state),
0);
md_write_end(r10_bio->mddev);
}
@ -501,7 +496,6 @@ static void raid10_end_write_request(struct bio *bio)
set_bit(R10BIO_WriteError, &r10_bio->state);
else {
/* Fail the request */
set_bit(R10BIO_Degraded, &r10_bio->state);
r10_bio->devs[slot].bio = NULL;
to_put = bio;
dec_rdev = 1;
@ -1490,10 +1484,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
if (!rdev && !rrdev) {
set_bit(R10BIO_Degraded, &r10_bio->state);
if (!rdev && !rrdev)
continue;
}
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
@ -1510,14 +1502,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* to other devices yet
*/
max_sectors = bad_sectors;
/* We don't set R10BIO_Degraded as that
* only applies if the disk is missing,
* so it might be re-added, and we want to
* know to recover this chunk.
* In this case the device is here, and the
* fact that this chunk is not in-sync is
* recorded in the bad block log.
*/
continue;
}
if (is_bad) {
@ -1554,7 +1538,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
md_account_bio(mddev, &bio);
r10_bio->master_bio = bio;
atomic_set(&r10_bio->remaining, 1);
md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio)
@ -3063,11 +3046,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL && bio->bi_status) {
fail = true;
if (!narrow_write_error(r10_bio, m)) {
if (!narrow_write_error(r10_bio, m))
md_error(conf->mddev, rdev);
set_bit(R10BIO_Degraded,
&r10_bio->state);
}
rdev_dec_pending(rdev, conf->mddev);
}
bio = r10_bio->devs[m].repl_bio;
@ -3126,8 +3106,6 @@ static void raid10d(struct md_thread *thread)
r10_bio = list_first_entry(&tmp, struct r10bio,
retry_list);
list_del(&r10_bio->retry_list);
if (mddev->degraded)
set_bit(R10BIO_Degraded, &r10_bio->state);
if (test_bit(R10BIO_WriteError,
&r10_bio->state))

View File

@ -161,7 +161,6 @@ enum r10bio_state {
R10BIO_IsSync,
R10BIO_IsRecover,
R10BIO_IsReshape,
R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/

View File

@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
RAID5_STRIPE_SECTORS(conf),
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
}
}
}

View File

@ -905,7 +905,6 @@ static bool stripe_can_batch(struct stripe_head *sh)
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
is_full_stripe_write(sh);
}
@ -1359,8 +1358,6 @@ again:
submit_bio_noacct(rbi);
}
if (!rdev && !rrdev) {
if (op_is_write(op))
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %d on disc %d for sector %llu\n",
bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
@ -2925,7 +2922,6 @@ static void raid5_end_write_request(struct bio *bi)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (bi->bi_status) {
set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
@ -3590,29 +3586,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
(*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
sh->dev[dd_idx].sector);
if (conf->mddev->bitmap && firstwrite) {
/* Cannot hold spinlock over bitmap_startwrite,
* but must ensure this isn't added to a batch until
* we have added to the bitmap and set bm_seq.
* So set STRIPE_BITMAP_PENDING to prevent
* batching.
* If multiple __add_stripe_bio() calls race here they
* much all set STRIPE_BITMAP_PENDING. So only the first one
* to complete "bitmap_startwrite" gets to set
* STRIPE_BIT_DELAY. This is important as once a stripe
* is added to a batch, STRIPE_BIT_DELAY cannot be changed
* any more.
*/
set_bit(STRIPE_BITMAP_PENDING, &sh->state);
spin_unlock_irq(&sh->stripe_lock);
md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0);
spin_lock_irq(&sh->stripe_lock);
clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
if (!sh->batch_head) {
sh->bm_seq = conf->seq_flush+1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
sh->bm_seq = conf->seq_flush+1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
@ -3663,7 +3639,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
struct md_rdev *rdev;
@ -3690,8 +3665,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].towrite = NULL;
sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
if (bi)
bitmap_end = 1;
log_stripe_write_finished(sh);
@ -3706,10 +3679,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bio_io_error(bi);
bi = nextbi;
}
if (bitmap_end)
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0, 0);
bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
@ -3718,7 +3687,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].page = sh->dev[i].orig_page;
}
if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
@ -3752,9 +3720,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi;
}
}
if (bitmap_end)
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0, 0);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@ -4105,10 +4070,7 @@ returnbi:
bio_endio(wbi);
wbi = wbi2;
}
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
RAID5_STRIPE_SECTORS(conf),
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head,
@ -4385,7 +4347,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
s->locked++;
set_bit(R5_Wantwrite, &dev->flags);
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
case check_state_run:
@ -4542,7 +4503,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
clear_bit(R5_Wantwrite, &dev->flags);
s->locked--;
}
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
@ -4942,8 +4902,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_COMPUTE_RUN) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
(1 << STRIPE_BATCH_ERR) |
(1 << STRIPE_BITMAP_PENDING)),
(1 << STRIPE_BATCH_ERR)),
"stripe state: %lx\n", sh->state);
WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
(1 << STRIPE_REPLACED)),
@ -4951,7 +4910,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED) |
(1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
@ -5848,13 +5806,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
for (d = 0;
d < conf->raid_disks - conf->max_degraded;
d++)
md_bitmap_startwrite(mddev->bitmap,
sh->sector,
RAID5_STRIPE_SECTORS(conf),
0);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
@ -5972,6 +5923,87 @@ static bool reshape_disabled(struct mddev *mddev)
return is_md_suspended(mddev) || !md_is_rdwr(mddev);
}
enum reshape_loc {
LOC_NO_RESHAPE,
LOC_AHEAD_OF_RESHAPE,
LOC_INSIDE_RESHAPE,
LOC_BEHIND_RESHAPE,
};
static enum reshape_loc get_reshape_loc(struct mddev *mddev,
struct r5conf *conf, sector_t logical_sector)
{
sector_t reshape_progress, reshape_safe;
/*
* Spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
* possible to see a half-updated value
* Of course reshape_progress could change after
* the lock is dropped, so once we get a reference
* to the stripe that we think it is, we will have
* to check again.
*/
spin_lock_irq(&conf->device_lock);
reshape_progress = conf->reshape_progress;
reshape_safe = conf->reshape_safe;
spin_unlock_irq(&conf->device_lock);
if (reshape_progress == MaxSector)
return LOC_NO_RESHAPE;
if (ahead_of_reshape(mddev, logical_sector, reshape_progress))
return LOC_AHEAD_OF_RESHAPE;
if (ahead_of_reshape(mddev, logical_sector, reshape_safe))
return LOC_INSIDE_RESHAPE;
return LOC_BEHIND_RESHAPE;
}
static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
unsigned long *sectors)
{
struct r5conf *conf = mddev->private;
sector_t start = *offset;
sector_t end = start + *sectors;
sector_t prev_start = start;
sector_t prev_end = end;
int sectors_per_chunk;
enum reshape_loc loc;
int dd_idx;
sectors_per_chunk = conf->chunk_sectors *
(conf->raid_disks - conf->max_degraded);
start = round_down(start, sectors_per_chunk);
end = round_up(end, sectors_per_chunk);
start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
/*
* For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
* progress, hence it's the same as LOC_BEHIND_RESHAPE.
*/
loc = get_reshape_loc(mddev, conf, prev_start);
if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
*offset = start;
*sectors = end - start;
return;
}
sectors_per_chunk = conf->prev_chunk_sectors *
(conf->previous_raid_disks - conf->max_degraded);
prev_start = round_down(prev_start, sectors_per_chunk);
prev_end = round_down(prev_end, sectors_per_chunk);
prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
/*
* for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
* is handled in make_stripe_request(), we can't know this here hence
* we set bits for both.
*/
*offset = min(start, prev_start);
*sectors = max(end, prev_end) - *offset;
}
static enum stripe_result make_stripe_request(struct mddev *mddev,
struct r5conf *conf, struct stripe_request_ctx *ctx,
sector_t logical_sector, struct bio *bi)
@ -5986,28 +6018,14 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
seq = read_seqcount_begin(&conf->gen_lock);
if (unlikely(conf->reshape_progress != MaxSector)) {
/*
* Spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
* possible to see a half-updated value
* Of course reshape_progress could change after
* the lock is dropped, so once we get a reference
* to the stripe that we think it is, we will have
* to check again.
*/
spin_lock_irq(&conf->device_lock);
if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_progress)) {
previous = 1;
} else {
if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_safe)) {
spin_unlock_irq(&conf->device_lock);
ret = STRIPE_SCHEDULE_AND_RETRY;
goto out;
}
enum reshape_loc loc = get_reshape_loc(mddev, conf,
logical_sector);
if (loc == LOC_INSIDE_RESHAPE) {
ret = STRIPE_SCHEDULE_AND_RETRY;
goto out;
}
spin_unlock_irq(&conf->device_lock);
if (loc == LOC_AHEAD_OF_RESHAPE)
previous = 1;
}
new_sector = raid5_compute_sector(conf, logical_sector, previous,
@ -6189,8 +6207,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
(conf->reshape_progress != MaxSector) &&
!ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) {
bio_wouldblock_error(bi);
if (rw == WRITE)
md_write_end(mddev);
@ -9090,6 +9107,7 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid5_personality =
{
@ -9115,6 +9133,7 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid4_personality =
@ -9141,6 +9160,7 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.bitmap_sector = raid5_bitmap_sector,
};
static int __init raid5_init(void)

View File

@ -358,7 +358,6 @@ enum {
STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
STRIPE_DEGRADED,
STRIPE_BIT_DELAY,
STRIPE_EXPANDING,
STRIPE_EXPAND_SOURCE,
@ -372,9 +371,6 @@ enum {
STRIPE_ON_RELEASE_LIST,
STRIPE_BATCH_READY,
STRIPE_BATCH_ERR,
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet.
*/
STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
* this bit is used in two scenarios:
*

View File

@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
{
u64 tmp;
tmp = (u64) ifhz * 16777216;
do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
return (u32) tmp;
return div_u64(ifhz * 16777216ull,
(xtal == SONY_XTAL_24000) ? 48000000 : 41000000);
}
static u32 cxd2841er_calc_iffreq(u32 ifhz)

View File

@ -8,6 +8,7 @@
* Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
*/
#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
@ -146,6 +147,19 @@ static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
return ret;
}
static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
u8 val)
{
int ret;
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret < 0)
dev_err(&priv->client->dev,
"Cannot update register 0x%02x %d!\n", reg, ret);
return ret;
}
/*
* GPIO chip
*/
@ -733,10 +747,13 @@ static int ub913_hw_init(struct ub913_data *priv)
if (ret)
return dev_err_probe(dev, ret, "i2c master init failed\n");
ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
ub913_write(priv, UB913_REG_GENERAL_CFG, v);
ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
UB913_REG_GENERAL_CFG_PCLK_RISING,
FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
priv->pclk_polarity_rising));
if (ret)
return ret;
return 0;
}

View File

@ -398,8 +398,13 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
int ret;
/* Set all GPIOs to local input mode */
ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
if (ret)
return ret;
ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
if (ret)
return ret;
gc->label = dev_name(dev);
gc->parent = dev;
@ -961,10 +966,11 @@ static void ub953_calc_clkout_params(struct ub953_data *priv,
clkout_data->rate = clkout_rate;
}
static void ub953_write_clkout_regs(struct ub953_data *priv,
const struct ub953_clkout_data *clkout_data)
static int ub953_write_clkout_regs(struct ub953_data *priv,
const struct ub953_clkout_data *clkout_data)
{
u8 clkout_ctrl0, clkout_ctrl1;
int ret;
if (priv->hw_data->is_ub971)
clkout_ctrl0 = clkout_data->m;
@ -974,8 +980,15 @@ static void ub953_write_clkout_regs(struct ub953_data *priv,
clkout_ctrl1 = clkout_data->n;
ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
if (ret)
return ret;
ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
if (ret)
return ret;
return 0;
}
static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
@ -1055,9 +1068,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
clkout_data.rate, rate);
ub953_write_clkout_regs(priv, &clkout_data);
return 0;
return ub953_write_clkout_regs(priv, &clkout_data);
}
static const struct clk_ops ub953_clkout_ops = {
@ -1082,7 +1093,9 @@ static int ub953_register_clkout(struct ub953_data *priv)
/* Initialize clkout to 25MHz by default */
ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
ub953_write_clkout_regs(priv, &clkout_data);
ret = ub953_write_clkout_regs(priv, &clkout_data);
if (ret)
return ret;
priv->clkout_clk_hw.init = &init;
@ -1229,10 +1242,15 @@ static int ub953_hw_init(struct ub953_data *priv)
if (ret)
return dev_err_probe(dev, ret, "i2c init failed\n");
ub953_write(priv, UB953_REG_GENERAL_CFG,
(priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
v = 0;
v |= priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK;
v |= (priv->num_data_lanes - 1) <<
UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
if (ret)
return ret;
return 0;
}

View File

@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
mux_args.mux_buf_sz = mux_buf_sz;
dvb->streaming = true;
dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
if (!dvb->mux)
return -ENOMEM;
dvb->streaming = true;
vidtv_mux_start_thread(dvb->mux);
dev_dbg_ratelimited(dev, "Started streaming\n");
@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
{
struct device *dev = &dvb->pdev->dev;
if (!dvb->streaming) {
dev_warn_ratelimited(dev, "No streaming. Skipping.\n");
return 0;
}
dvb->streaming = false;
vidtv_mux_stop_thread(dvb->mux);
vidtv_mux_destroy(dvb->mux);

View File

@ -2886,6 +2886,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
/* Sonix Technology Co. Ltd. - 292A IPC AR0330 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0c45,
.idProduct = 0x6366,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
/* MT6227 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@ -2914,6 +2923,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
/* Kurokesu C1 PRO */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x16d0,
.idProduct = 0x0ed1,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
/* Syntek (HP Spartan) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,

View File

@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <media/jpeg.h>
#include <media/v4l2-common.h>
#include "uvcvideo.h"
@ -1114,6 +1115,7 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
static int uvc_video_decode_start(struct uvc_streaming *stream,
struct uvc_buffer *buf, const u8 *data, int len)
{
u8 header_len;
u8 fid;
/*
@ -1127,6 +1129,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -EINVAL;
}
header_len = data[0];
fid = data[1] & UVC_STREAM_FID;
/*
@ -1208,9 +1211,31 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -EAGAIN;
}
/*
* Some cameras, when running two parallel streams (one MJPEG alongside
* another non-MJPEG stream), are known to lose the EOF packet for a frame.
* We can detect the end of a frame by checking for a new SOI marker, as
* the SOI always lies on the packet boundary between two frames for
* these devices.
*/
if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
(stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) {
const u8 *packet = data + header_len;
if (len >= header_len + 2 &&
packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI &&
buf->bytesused != 0) {
buf->state = UVC_BUF_STATE_READY;
buf->error = 1;
stream->last_fid ^= UVC_STREAM_FID;
return -EAGAIN;
}
}
stream->last_fid = fid;
return data[0];
return header_len;
}
static inline enum dma_data_direction uvc_stream_dir(

View File

@ -76,6 +76,7 @@
#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000
#define UVC_QUIRK_MJPEG_NO_EOF 0x00020000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001

View File

@ -260,6 +260,7 @@
#define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
@ -305,6 +306,7 @@
/* EMMC50_PAD_DS_TUNE mask */
#define PAD_DS_DLY_SEL BIT(16) /* RW */
#define PAD_DS_DLY2_SEL BIT(15) /* RW */
#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
@ -2309,13 +2311,23 @@ tune_done:
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
host->hs400_mode = true;
if (host->top_base)
writel(host->hs400_ds_delay,
host->top_base + EMMC50_PAD_DS_TUNE);
else
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
if (host->top_base) {
if (host->hs400_ds_dly3)
sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY3, host->hs400_ds_dly3);
if (host->hs400_ds_delay)
writel(host->hs400_ds_delay,
host->top_base + EMMC50_PAD_DS_TUNE);
} else {
if (host->hs400_ds_dly3)
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
if (host->hs400_ds_delay)
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
}
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
@ -2335,14 +2347,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
if (host->top_base) {
sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY_SEL);
if (host->hs400_ds_dly3)
sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY3, host->hs400_ds_dly3);
sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY2_SEL);
} else {
sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
if (host->hs400_ds_dly3)
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
}
host->hs400_tuning = true;

View File

@ -394,15 +394,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
goto exit_free_device;
goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
exit_free_device:
exit_pm_runtime:
pm_runtime_disable(priv->device);
exit_free_device:
free_c_can_dev(dev);
exit:
dev_err(&pdev->dev, "probe failed\n");

View File

@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
cf->can_id |= CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
if (skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
}
break;
default:
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",

View File

@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
return ret;
}
return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
if (es58x_dev->udev->serial)
ret = devlink_info_serial_number_put(req,
es58x_dev->udev->serial);
return ret;
}
const struct devlink_ops es58x_dl_ops = {

View File

@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return -ENOMEM;
}
buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;

View File

@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
if (err)
return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)

View File

@ -39,10 +39,14 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
if (!sap->used)
continue;
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
if (sap->xs->props.family == AF_INET6)
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] %cx ipaddr=%pI6c\n",
i, (sap->rx ? 'r' : 't'), &sap->ipaddr);
else
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] %cx ipaddr=%pI4\n",
i, (sap->rx ? 'r' : 't'), &sap->ipaddr[3]);
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
i, be32_to_cpu(sap->xs->id.spi),

View File

@ -2657,7 +2657,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
ctx.data.u32_val = nla_get_u32(attr_data);
break;
case TEAM_OPTION_TYPE_STRING:
if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
!memchr(nla_data(attr_data), '\0',
nla_len(attr_data))) {
err = -EINVAL;
goto team_put;
}

View File

@ -2966,8 +2966,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_init(vxlan);
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
err = vxlan_vnigroup_init(vxlan);
if (err)
return err;
}
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats) {

View File

@ -4418,6 +4418,22 @@ static struct ath12k_reg_rule
return reg_rule_ptr;
}
static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
u32 num_reg_rules)
{
u8 num_invalid_5ghz_rules = 0;
u32 count, start_freq;
for (count = 0; count < num_reg_rules; count++) {
start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
if (start_freq >= ATH12K_MIN_6G_FREQ)
num_invalid_5ghz_rules++;
}
return num_invalid_5ghz_rules;
}
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
@ -4428,6 +4444,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
u8 num_invalid_5ghz_ext_rules;
u32 total_reg_rules = 0;
int ret, i, j;
@ -4521,20 +4538,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
/* FIXME: Currently FW includes 6G reg rule also in 5G rule
* list for country US.
* Having same 6G reg rule in 5G and 6G rules list causes
* intersect check to be true, and same rules will be shown
* multiple times in iw cmd. So added hack below to avoid
* parsing 6G rule from 5G reg rule list, and this can be
* removed later, after FW updates to remove 6G reg rule
* from 5G rules list.
*/
if (memcmp(reg_info->alpha2, "US", 2) == 0) {
reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
num_5g_reg_rules = reg_info->num_5g_reg_rules;
}
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
@ -4636,8 +4639,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
ext_wmi_reg_rule += num_2g_reg_rules;
/* Firmware might include 6 GHz reg rule in 5 GHz rule list
* for few countries along with separate 6 GHz rule.
* Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
* causes intersect check to be true, and same rules will be
* shown multiple times in iw cmd.
* Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
*/
num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
num_5g_reg_rules);
if (num_invalid_5ghz_ext_rules) {
ath12k_dbg(ab, ATH12K_DBG_WMI,
"CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
reg_info->alpha2, reg_info->num_5g_reg_rules,
num_invalid_5ghz_ext_rules);
num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
reg_info->num_5g_reg_rules = num_5g_reg_rules;
}
if (num_5g_reg_rules) {
ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
@ -4649,7 +4673,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
ext_wmi_reg_rule += num_5g_reg_rules;
/* We have adjusted the number of 5 GHz reg rules above. But still those
* many rules needs to be adjusted in ext_wmi_reg_rule.
*
* NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
*/
ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =

View File

@ -3891,7 +3891,6 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
#define MAX_6G_REG_RULES 5
#define REG_US_5G_NUM_REG_RULES 4
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,

View File

@ -6032,6 +6032,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
#define SWITCHTEC_PCI100X_QUIRK(vid) \
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
* These IDs are used to forward responses to the originator on the other
@ -6301,6 +6312,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif

View File

@ -1737,6 +1737,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.driver_data = gen, \
}
#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
{ \
.vendor = PCI_VENDOR_ID_EFAR, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = (PCI_CLASS_MEMORY_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
.driver_data = gen, \
}, \
{ \
.vendor = PCI_VENDOR_ID_EFAR, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = (PCI_CLASS_BRIDGE_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
.driver_data = gen, \
}
static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
@ -1831,6 +1851,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);

View File

@ -1281,7 +1281,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
ret = devm_request_threaded_irq(chip->dev, irq,
NULL, cy8c95x0_irq_handler,
IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
if (ret) {
dev_err(chip->dev, "failed to request irq %d\n", irq);

View File

@ -646,15 +646,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
};
static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
{ .start = 0x01c, .end = 0x0c8 },
{ .start = 0x12c, .end = 0x184 },
{ .start = 0x01c, .end = 0x064 },
{ .start = 0x084, .end = 0x0a0 },
{ .start = 0x0a4, .end = 0x0c8 },
{ .start = 0x12c, .end = 0x164 },
{ .start = 0x16c, .end = 0x184 },
{ .start = 0x190, .end = 0x198 },
{ .start = 0x1a0, .end = 0x204 },
{ .start = 0x21c, .end = 0x250 },
{ .start = 0x25c, .end = 0x2f0 },
{ .start = 0x21c, .end = 0x2f0 },
{ .start = 0x310, .end = 0x3d8 },
{ .start = 0x400, .end = 0x4f0 },
{ .start = 0x4f8, .end = 0x7e8 },
{ .start = 0x400, .end = 0x420 },
{ .start = 0x444, .end = 0x490 },
{ .start = 0x4bc, .end = 0x4f0 },
{ .start = 0x4f8, .end = 0x54c },
{ .start = 0x57c, .end = 0x7e8 },
{ .start = 0x8d0, .end = 0x8d8 },
{ .start = 0xacc, .end = 0xf00 }
};

View File

@ -116,6 +116,9 @@ struct f_ospi {
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
{
if (!op->dummy.nbytes)
return 0;
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
}

View File

@ -350,6 +350,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
#ifdef CONFIG_SERIAL_8250_DMA
extern int serial8250_tx_dma(struct uart_8250_port *);
extern void serial8250_tx_dma_flush(struct uart_8250_port *);
extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
@ -382,6 +383,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
return -1;
}
static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
static inline int serial8250_rx_dma(struct uart_8250_port *p)
{
return -1;

View File

@ -139,6 +139,22 @@ err:
return ret;
}
void serial8250_tx_dma_flush(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
if (!dma->tx_running)
return;
/*
* kfifo_reset() has been called by the serial core, avoid
* advancing and underflowing in __dma_tx_complete().
*/
dma->tx_size = 0;
dmaengine_terminate_async(dma->rxchan);
}
int serial8250_rx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;

View File

@ -2557,6 +2557,14 @@ static unsigned int npcm_get_divisor(struct uart_8250_port *up,
return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
}
static void serial8250_flush_buffer(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
if (up->dma)
serial8250_tx_dma_flush(up);
}
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@ -3260,6 +3268,7 @@ static const struct uart_ops serial8250_pops = {
.break_ctl = serial8250_break_ctl,
.startup = serial8250_startup,
.shutdown = serial8250_shutdown,
.flush_buffer = serial8250_flush_buffer,
.set_termios = serial8250_set_termios,
.set_ldisc = serial8250_set_ldisc,
.pm = serial8250_pm,

View File

@ -172,6 +172,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
* The caller is responsible to initialize the following fields of the @port
* ->dev (must be valid)
* ->flags
* ->iobase
* ->mapbase
* ->mapsize
* ->regshift (if @use_defaults is false)
@ -213,7 +214,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
/* Read the registers I/O access type (default: MMIO 8-bit) */
ret = device_property_read_u32(dev, "reg-io-width", &value);
if (ret) {
port->iotype = UPIO_MEM;
port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
} else {
switch (value) {
case 1:
@ -226,11 +227,11 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
break;
default:
port->iotype = UPIO_UNKNOWN;
if (!use_defaults) {
dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
return -EINVAL;
}
port->iotype = UPIO_UNKNOWN;
break;
}
}

View File

@ -216,6 +216,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
return;
bsg_remove_queue(hba->bsg_queue);
hba->bsg_queue = NULL;
device_del(bsg_dev);
put_device(bsg_dev);

View File

@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
struct usb_cdc_notification *dr = urb->transfer_buffer;
struct usb_cdc_notification *dr;
unsigned int current_size = urb->actual_length;
unsigned int expected_size, copy_size, alloc_size;
int retval;
@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
usb_mark_last_busy(acm->dev);
if (acm->nb_index)
if (acm->nb_index == 0) {
/*
* The first chunk of a message must contain at least the
* notification header with the length field, otherwise we
* can't get an expected_size.
*/
if (current_size < sizeof(struct usb_cdc_notification)) {
dev_dbg(&acm->control->dev, "urb too short\n");
goto exit;
}
dr = urb->transfer_buffer;
} else {
dr = (struct usb_cdc_notification *)acm->notification_buffer;
}
/* size = notification-header + (optional) data */
expected_size = sizeof(struct usb_cdc_notification) +
le16_to_cpu(dr->wLength);
if (current_size < expected_size) {
if (acm->nb_index != 0 || current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
u8 *new_buffer;
@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
{ USB_DEVICE(0x045b, 0x023c), /* Renesas R-Car H3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
{ USB_DEVICE(0x045b, 0x0247), /* Renesas R-Car D3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
{ USB_DEVICE(0x045b, 0x0248), /* Renesas R-Car M3-N USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */

View File

@ -1818,6 +1818,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
/*
* The USB 2.0 spec prohibits hubs from having more than one
* configuration or interface, and we rely on this prohibition.
* Refuse to accept a device that violates it.
*/
if (hdev->descriptor.bNumConfigurations > 1 ||
hdev->actconfig->desc.bNumInterfaces > 1) {
dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
return -EINVAL;
}
/*
* Set default autosuspend delay as 0 to speedup bus suspend,
* based on the below considerations:
@ -4666,7 +4677,6 @@ void usb_ep0_reinit(struct usb_device *udev)
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
@ -4772,7 +4782,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
/* Start with invalid values in case the transfer fails */
buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, size,

View File

@ -432,6 +432,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
/* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
{ USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@ -522,6 +525,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
/* Teclast disk */
{ USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
/* Hauppauge HVR-950q */
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },

View File

@ -4612,6 +4612,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->driver = NULL;
hsotg->gadget.dev.of_node = NULL;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
hsotg->enabled = 0;

View File

@ -2618,10 +2618,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
{
u32 reg;
u32 timeout = 2000;
u32 saved_config = 0;
if (pm_runtime_suspended(dwc->dev))
return 0;
/*
* When operating in USB 2.0 speeds (HS/FS), ensure that
* GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
* or stopping the controller. This resolves timeout issues that occur
* during frequent role switches between host and device modes.
*
* Save and clear these settings, then restore them after completing the
* controller start or stop sequence.
*
* This solution was discovered through experimentation as it is not
* mentioned in the dwc3 programming guide. It has been tested on an
* Exynos platforms.
*/
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
}
if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
}
if (saved_config)
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
@ -2649,6 +2677,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
if (saved_config) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= saved_config;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
if (!timeout)
return -ETIMEDOUT;

View File

@ -906,6 +906,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
status = -ENODEV;
/*
* Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
* endpoint claim. This ensures that the wMaxPacketSize does not exceed the
* limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
* size of 512 bytes for IN/OUT endpoints in support HS speed only.
*/
bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
/* allocate instance-specific endpoints */
midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
if (!midi->in_ep)
@ -999,11 +1008,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
}
/* configure the endpoint descriptors ... */
ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
/* ... and add them to the list */
endpoint_descriptor_index = i;

View File

@ -310,7 +310,7 @@ struct renesas_usb3_request {
struct list_head queue;
};
#define USB3_EP_NAME_SIZE 8
#define USB3_EP_NAME_SIZE 16
struct renesas_usb3_ep {
struct usb_ep ep;
struct renesas_usb3 *usb3;

View File

@ -946,6 +946,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
/* LS7A EHCI controller doesn't have extended capabilities, the
* EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
* register should be 0x0 but it reads as 0xa0. So clear it to
* avoid error messages on boot.
*/
if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
hcc_params &= ~(0xffL << 8);
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
pci_read_config_dword(pdev, offset, &cap);

Some files were not shown because too many files have changed in this diff Show More