Merge linux 6.6.46
Conflicts: drivers/platform/x86/intel/ifs/core.c drivers/platform/x86/intel/ifs/ifs.h kernel/sched/core.c
This commit is contained in:
commit
0a76ebf09a
|
@ -741,7 +741,7 @@ SecurityFlags Flags which control security negotiation and
|
|||
may use NTLMSSP 0x00080
|
||||
must use NTLMSSP 0x80080
|
||||
seal (packet encryption) 0x00040
|
||||
must seal (not implemented yet) 0x40040
|
||||
must seal 0x40040
|
||||
|
||||
cifsFYI If set to non-zero value, additional debug information
|
||||
will be logged to the system error log. This field
|
||||
|
|
|
@ -664,12 +664,6 @@
|
|||
loops can be debugged more effectively on production
|
||||
systems.
|
||||
|
||||
clocksource.max_cswd_read_retries= [KNL]
|
||||
Number of clocksource_watchdog() retries due to
|
||||
external delays before the clock will be marked
|
||||
unstable. Defaults to two retries, that is,
|
||||
three attempts to read the clock under test.
|
||||
|
||||
clocksource.verify_n_cpus= [KNL]
|
||||
Limit the number of CPUs checked for clocksources
|
||||
marked with CLOCK_SOURCE_VERIFY_PERCPU that
|
||||
|
@ -4671,11 +4665,9 @@
|
|||
|
||||
profile= [KNL] Enable kernel profiling via /proc/profile
|
||||
Format: [<profiletype>,]<number>
|
||||
Param: <profiletype>: "schedule", "sleep", or "kvm"
|
||||
Param: <profiletype>: "schedule" or "kvm"
|
||||
[defaults to kernel profiling]
|
||||
Param: "schedule" - profile schedule points.
|
||||
Param: "sleep" - profile D-state sleeping (millisecs).
|
||||
Requires CONFIG_SCHEDSTATS
|
||||
Param: "kvm" - profile VM exits.
|
||||
Param: <number> - step/bucket size as a power of 2 for
|
||||
statistical time based profiling.
|
||||
|
|
|
@ -121,32 +121,68 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1349291 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | MMU-500 | #841119,826419 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | MMU-600 | #1076982,1209401| N/A |
|
||||
|
|
|
@ -15,11 +15,11 @@ Supported devices:
|
|||
|
||||
Corsair HX850i
|
||||
|
||||
Corsair HX1000i (Series 2022 and 2023)
|
||||
Corsair HX1000i (Legacy and Series 2023)
|
||||
|
||||
Corsair HX1200i
|
||||
Corsair HX1200i (Legacy and Series 2023)
|
||||
|
||||
Corsair HX1500i (Series 2022 and 2023)
|
||||
Corsair HX1500i (Legacy and Series 2023)
|
||||
|
||||
Corsair RM550i
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ else
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 45
|
||||
SUBLEVEL = 46
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -1091,6 +1091,44 @@ config ARM64_ERRATUM_3117295
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_3194386
|
||||
bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for the following errata:
|
||||
|
||||
* ARM Cortex-A76 erratum 3324349
|
||||
* ARM Cortex-A77 erratum 3324348
|
||||
* ARM Cortex-A78 erratum 3324344
|
||||
* ARM Cortex-A78C erratum 3324346
|
||||
* ARM Cortex-A78C erratum 3324347
|
||||
* ARM Cortex-A710 erratam 3324338
|
||||
* ARM Cortex-A720 erratum 3456091
|
||||
* ARM Cortex-A725 erratum 3456106
|
||||
* ARM Cortex-X1 erratum 3324344
|
||||
* ARM Cortex-X1C erratum 3324346
|
||||
* ARM Cortex-X2 erratum 3324338
|
||||
* ARM Cortex-X3 erratum 3324335
|
||||
* ARM Cortex-X4 erratum 3194386
|
||||
* ARM Cortex-X925 erratum 3324334
|
||||
* ARM Neoverse-N1 erratum 3324349
|
||||
* ARM Neoverse N2 erratum 3324339
|
||||
* ARM Neoverse-V1 erratum 3324341
|
||||
* ARM Neoverse V2 erratum 3324336
|
||||
* ARM Neoverse-V3 erratum 3312417
|
||||
|
||||
On affected cores "MSR SSBS, #0" instructions may not affect
|
||||
subsequent speculative instructions, which may permit unexepected
|
||||
speculative store bypassing.
|
||||
|
||||
Work around this problem by placing a Speculation Barrier (SB) or
|
||||
Instruction Synchronization Barrier (ISB) after kernel changes to
|
||||
SSBS. The presence of the SSBS special-purpose register is hidden
|
||||
from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
|
||||
will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
|
|
@ -40,6 +40,10 @@
|
|||
*/
|
||||
#define dgh() asm volatile("hint #6" : : : "memory")
|
||||
|
||||
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
|
||||
SB_BARRIER_INSN"nop\n", \
|
||||
ARM64_HAS_SB))
|
||||
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
#define pmr_sync() \
|
||||
do { \
|
||||
|
|
|
@ -88,6 +88,14 @@
|
|||
#define ARM_CPU_PART_CORTEX_X2 0xD48
|
||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
|
||||
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
|
||||
#define ARM_CPU_PART_CORTEX_X3 0xD4E
|
||||
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
|
||||
#define ARM_CPU_PART_CORTEX_A720 0xD81
|
||||
#define ARM_CPU_PART_CORTEX_X4 0xD82
|
||||
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
|
||||
#define ARM_CPU_PART_CORTEX_X925 0xD85
|
||||
#define ARM_CPU_PART_CORTEX_A725 0xD87
|
||||
|
||||
#define APM_CPU_PART_XGENE 0x000
|
||||
#define APM_CPU_VAR_POTENZA 0x00
|
||||
|
@ -173,6 +181,14 @@
|
|||
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
|
||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
|
||||
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
|
||||
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
|
||||
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
|
||||
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
|
||||
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
|
||||
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
|
||||
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
|
||||
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
|
|
|
@ -448,6 +448,30 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_3194386
|
||||
static const struct midr_range erratum_spec_ssbs_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
|
||||
{}
|
||||
};
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
|
@ -746,6 +770,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.cpu_enable = cpu_clear_bf16_from_user_emulation,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_3194386
|
||||
{
|
||||
.desc = "SSBS not fully self-synchronizing",
|
||||
.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
|
||||
ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
|
||||
{
|
||||
.desc = "ARM errata 2966298, 3117295",
|
||||
|
|
|
@ -2190,6 +2190,17 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
|||
}
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
static void user_feature_fixup(void)
|
||||
{
|
||||
if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
|
||||
struct arm64_ftr_reg *regp;
|
||||
|
||||
regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
|
||||
if (regp)
|
||||
regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
static void elf_hwcap_fixup(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
|
@ -3345,6 +3356,7 @@ void __init setup_cpu_features(void)
|
|||
u32 cwg;
|
||||
|
||||
setup_system_capabilities();
|
||||
user_feature_fixup();
|
||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||
|
||||
if (system_supports_32bit_el0()) {
|
||||
|
|
|
@ -558,6 +558,18 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
|
|||
|
||||
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
|
||||
set_pstate_ssbs(0);
|
||||
|
||||
/*
|
||||
* SSBS is self-synchronizing and is intended to affect subsequent
|
||||
* speculative instructions, but some CPUs can speculate with a stale
|
||||
* value of SSBS.
|
||||
*
|
||||
* Mitigate this with an unconditional speculation barrier, as CPUs
|
||||
* could mis-speculate branches and bypass a conditional barrier.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
|
||||
spec_bar();
|
||||
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,4 +99,5 @@ WORKAROUND_NVIDIA_CARMEL_CNP
|
|||
WORKAROUND_QCOM_FALKOR_E1003
|
||||
WORKAROUND_REPEAT_TLBI
|
||||
WORKAROUND_SPECULATIVE_AT
|
||||
WORKAROUND_SPECULATIVE_SSBS
|
||||
WORKAROUND_SPECULATIVE_UNPRIV_LOAD
|
||||
|
|
|
@ -254,6 +254,12 @@ void __init efi_runtime_init(void)
|
|||
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
||||
}
|
||||
|
||||
bool efi_poweroff_required(void)
|
||||
{
|
||||
return efi_enabled(EFI_RUNTIME_SERVICES) &&
|
||||
(acpi_gbl_reduced_hardware || acpi_no_s5);
|
||||
}
|
||||
|
||||
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
|
||||
|
||||
static void __init init_screen_info(void)
|
||||
|
|
|
@ -18,6 +18,7 @@ config PARISC
|
|||
select ARCH_SUPPORTS_HUGETLBFS if PA20
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select ARCH_STACKWALK
|
||||
select ARCH_HAS_CACHE_LINE_SIZE
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
select DMA_OPS
|
||||
|
|
|
@ -20,7 +20,16 @@
|
|||
|
||||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
|
||||
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
||||
#ifdef CONFIG_PA20
|
||||
#define ARCH_DMA_MINALIGN 128
|
||||
#else
|
||||
#define ARCH_DMA_MINALIGN 32
|
||||
#endif
|
||||
#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
|
||||
|
||||
#define arch_slab_minalign() ((unsigned)dcache_stride)
|
||||
#define cache_line_size() dcache_stride
|
||||
#define dma_get_cache_alignment cache_line_size
|
||||
|
||||
#define __read_mostly __section(".data..read_mostly")
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
jit_data->header =
|
||||
bpf_jit_binary_alloc(prog_size + extable_size,
|
||||
&jit_data->image,
|
||||
sizeof(u32),
|
||||
sizeof(long),
|
||||
bpf_fill_ill_insns);
|
||||
if (!jit_data->header) {
|
||||
prog = orig_prog;
|
||||
|
|
|
@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
|
|||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/*
|
||||
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
|
||||
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
|
||||
*
|
||||
* Native (and PV wanting native due to vCPU pinning) should disable this key.
|
||||
* It is done in this backwards fashion to only have a single direction change,
|
||||
* which removes ordering between native_pv_spin_init() and HV setup.
|
||||
* Native (and PV wanting native due to vCPU pinning) should keep this key
|
||||
* disabled. Native does not touch the key.
|
||||
*
|
||||
* When in a guest then native_pv_lock_init() enables the key first and
|
||||
* KVM/XEN might conditionally disable it later in the boot process again.
|
||||
*/
|
||||
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
|
||||
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
/*
|
||||
* Shortcut for the queued_spin_lock_slowpath() function that allows
|
||||
|
|
|
@ -609,7 +609,7 @@ void mtrr_save_state(void)
|
|||
{
|
||||
int first_cpu;
|
||||
|
||||
if (!mtrr_enabled())
|
||||
if (!mtrr_enabled() || !mtrr_state.have_fixed)
|
||||
return;
|
||||
|
||||
first_cpu = cpumask_first(cpu_online_mask);
|
||||
|
|
|
@ -71,13 +71,12 @@ DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text);
|
|||
DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
|
||||
#endif
|
||||
|
||||
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
|
||||
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
void __init native_pv_lock_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
|
||||
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
static_branch_disable(&virt_spin_lock_key);
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
static_branch_enable(&virt_spin_lock_key);
|
||||
}
|
||||
|
||||
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||
|
|
|
@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
|||
*/
|
||||
*target_pmd = *pmd;
|
||||
|
||||
addr += PMD_SIZE;
|
||||
addr = round_up(addr + 1, PMD_SIZE);
|
||||
|
||||
} else if (level == PTI_CLONE_PTE) {
|
||||
|
||||
/* Walk the page-table down to the pte level */
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
if (pte_none(*pte)) {
|
||||
addr += PAGE_SIZE;
|
||||
addr = round_up(addr + 1, PAGE_SIZE);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
|||
/* Clone the PTE */
|
||||
*target_pte = *pte;
|
||||
|
||||
addr += PAGE_SIZE;
|
||||
addr = round_up(addr + 1, PAGE_SIZE);
|
||||
|
||||
} else {
|
||||
BUG();
|
||||
|
@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
|
|||
{
|
||||
pti_clone_pgtable((unsigned long) __entry_text_start,
|
||||
(unsigned long) __entry_text_end,
|
||||
PTI_CLONE_PMD);
|
||||
PTI_LEVEL_KERNEL_IMAGE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -678,12 +678,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
|
|||
return count;
|
||||
}
|
||||
|
||||
static const struct device_attribute alarm_attr = {
|
||||
static struct device_attribute alarm_attr = {
|
||||
.attr = {.name = "alarm", .mode = 0644},
|
||||
.show = acpi_battery_alarm_show,
|
||||
.store = acpi_battery_alarm_store,
|
||||
};
|
||||
|
||||
static struct attribute *acpi_battery_attrs[] = {
|
||||
&alarm_attr.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(acpi_battery);
|
||||
|
||||
/*
|
||||
* The Battery Hooking API
|
||||
*
|
||||
|
@ -823,7 +829,10 @@ static void __exit battery_hook_exit(void)
|
|||
|
||||
static int sysfs_add_battery(struct acpi_battery *battery)
|
||||
{
|
||||
struct power_supply_config psy_cfg = { .drv_data = battery, };
|
||||
struct power_supply_config psy_cfg = {
|
||||
.drv_data = battery,
|
||||
.attr_grp = acpi_battery_groups,
|
||||
};
|
||||
bool full_cap_broken = false;
|
||||
|
||||
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
|
||||
|
@ -868,7 +877,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
|
|||
return result;
|
||||
}
|
||||
battery_hook_add_battery(battery);
|
||||
return device_create_file(&battery->bat->dev, &alarm_attr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sysfs_remove_battery(struct acpi_battery *battery)
|
||||
|
@ -879,7 +888,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
|
|||
return;
|
||||
}
|
||||
battery_hook_remove_battery(battery);
|
||||
device_remove_file(&battery->bat->dev, &alarm_attr);
|
||||
power_supply_unregister(battery->bat);
|
||||
battery->bat = NULL;
|
||||
mutex_unlock(&battery->sysfs_lock);
|
||||
|
|
|
@ -77,7 +77,6 @@ struct acpi_battery {
|
|||
u16 spec;
|
||||
u8 id;
|
||||
u8 present:1;
|
||||
u8 have_sysfs_alarm:1;
|
||||
};
|
||||
|
||||
#define to_acpi_battery(x) power_supply_get_drvdata(x)
|
||||
|
@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
|
|||
return count;
|
||||
}
|
||||
|
||||
static const struct device_attribute alarm_attr = {
|
||||
static struct device_attribute alarm_attr = {
|
||||
.attr = {.name = "alarm", .mode = 0644},
|
||||
.show = acpi_battery_alarm_show,
|
||||
.store = acpi_battery_alarm_store,
|
||||
};
|
||||
|
||||
static struct attribute *acpi_battery_attrs[] = {
|
||||
&alarm_attr.attr,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(acpi_battery);
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Driver Interface
|
||||
-------------------------------------------------------------------------- */
|
||||
|
@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery)
|
|||
static int acpi_battery_add(struct acpi_sbs *sbs, int id)
|
||||
{
|
||||
struct acpi_battery *battery = &sbs->battery[id];
|
||||
struct power_supply_config psy_cfg = { .drv_data = battery, };
|
||||
struct power_supply_config psy_cfg = {
|
||||
.drv_data = battery,
|
||||
.attr_grp = acpi_battery_groups,
|
||||
};
|
||||
int result;
|
||||
|
||||
battery->id = id;
|
||||
|
@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
|
|||
goto end;
|
||||
}
|
||||
|
||||
result = device_create_file(&battery->bat->dev, &alarm_attr);
|
||||
if (result)
|
||||
goto end;
|
||||
battery->have_sysfs_alarm = 1;
|
||||
end:
|
||||
pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
|
||||
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
|
||||
|
@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
|
|||
{
|
||||
struct acpi_battery *battery = &sbs->battery[id];
|
||||
|
||||
if (battery->bat) {
|
||||
if (battery->have_sysfs_alarm)
|
||||
device_remove_file(&battery->bat->dev, &alarm_attr);
|
||||
if (battery->bat)
|
||||
power_supply_unregister(battery->bat);
|
||||
}
|
||||
}
|
||||
|
||||
static int acpi_charger_add(struct acpi_sbs *sbs)
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
@ -2565,6 +2566,7 @@ static const char *dev_uevent_name(const struct kobject *kobj)
|
|||
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
||||
{
|
||||
const struct device *dev = kobj_to_dev(kobj);
|
||||
struct device_driver *driver;
|
||||
int retval = 0;
|
||||
|
||||
/* add device node properties if present */
|
||||
|
@ -2593,8 +2595,12 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
|||
if (dev->type && dev->type->name)
|
||||
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
|
||||
|
||||
if (dev->driver)
|
||||
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
|
||||
/* Synchronize with module_remove_driver() */
|
||||
rcu_read_lock();
|
||||
driver = READ_ONCE(dev->driver);
|
||||
if (driver)
|
||||
add_uevent_var(env, "DRIVER=%s", driver->name);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Add common DT information about the device */
|
||||
of_device_uevent(dev, env);
|
||||
|
@ -2664,11 +2670,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
|
|||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Synchronize with really_probe() */
|
||||
device_lock(dev);
|
||||
/* let the kset specific function add its keys */
|
||||
retval = kset->uevent_ops->uevent(&dev->kobj, env);
|
||||
device_unlock(dev);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include "base.h"
|
||||
|
||||
static char *make_driver_name(struct device_driver *drv)
|
||||
|
@ -97,6 +98,9 @@ void module_remove_driver(struct device_driver *drv)
|
|||
if (!drv)
|
||||
return;
|
||||
|
||||
/* Synchronize with dev_uevent() */
|
||||
synchronize_rcu();
|
||||
|
||||
sysfs_remove_link(&drv->p->kobj, "module");
|
||||
|
||||
if (drv->owner)
|
||||
|
|
|
@ -340,7 +340,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
|
|||
struct ps_data *psdata = &nxpdev->psdata;
|
||||
|
||||
flush_work(&psdata->work);
|
||||
del_timer_sync(&psdata->ps_timer);
|
||||
timer_shutdown_sync(&psdata->ps_timer);
|
||||
}
|
||||
|
||||
static void ps_control(struct hci_dev *hdev, u8 ps_state)
|
||||
|
|
|
@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
|
|||
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct sh_cmt_channel *ch = dev_id;
|
||||
unsigned long flags;
|
||||
|
||||
/* clear flags */
|
||||
sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
|
||||
|
@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
|
|||
|
||||
ch->flags &= ~FLAG_SKIPEVENT;
|
||||
|
||||
raw_spin_lock_irqsave(&ch->lock, flags);
|
||||
|
||||
if (ch->flags & FLAG_REPROGRAM) {
|
||||
ch->flags &= ~FLAG_REPROGRAM;
|
||||
sh_cmt_clock_event_program_verify(ch, 1);
|
||||
|
@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
|
|||
|
||||
ch->flags &= ~FLAG_IRQCONTEXT;
|
||||
|
||||
raw_spin_unlock_irqrestore(&ch->lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
|
|||
struct clock_event_device *ced)
|
||||
{
|
||||
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!clockevent_state_oneshot(ced));
|
||||
|
||||
raw_spin_lock_irqsave(&ch->lock, flags);
|
||||
|
||||
if (likely(ch->flags & FLAG_IRQCONTEXT))
|
||||
ch->next_match_value = delta - 1;
|
||||
else
|
||||
sh_cmt_set_next(ch, delta - 1);
|
||||
__sh_cmt_set_next(ch, delta - 1);
|
||||
|
||||
raw_spin_unlock_irqrestore(&ch->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
@ -164,7 +165,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
|
|||
if (hwnum >= gdev->ngpio)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return &gdev->descs[hwnum];
|
||||
return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_get_desc);
|
||||
|
||||
|
|
|
@ -3561,6 +3561,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
mutex_init(&adev->grbm_idx_mutex);
|
||||
mutex_init(&adev->mn_lock);
|
||||
mutex_init(&adev->virt.vf_errors.lock);
|
||||
mutex_init(&adev->virt.rlcg_reg_lock);
|
||||
hash_init(adev->mn_hash);
|
||||
mutex_init(&adev->psp.mutex);
|
||||
mutex_init(&adev->notifier_lock);
|
||||
|
|
|
@ -258,9 +258,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
|
|||
struct dma_fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
/* Ignore soft recovered fences here */
|
||||
r = drm_sched_entity_error(s_entity);
|
||||
if (r && r != -ENODATA)
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
|
|
|
@ -334,7 +334,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
|
|||
|
||||
set_ta_context_funcs(psp, ta_type, &context);
|
||||
|
||||
if (!context->initialized) {
|
||||
if (!context || !context->initialized) {
|
||||
dev_err(adev->dev, "TA is not initialized\n");
|
||||
ret = -EINVAL;
|
||||
goto err_free_shared_buf;
|
||||
|
|
|
@ -1785,12 +1785,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
|
|||
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
|
||||
struct ras_dispatch_if *info)
|
||||
{
|
||||
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
|
||||
struct ras_ih_data *data = &obj->ih_data;
|
||||
struct ras_manager *obj;
|
||||
struct ras_ih_data *data;
|
||||
|
||||
obj = amdgpu_ras_find_obj(adev, &info->head);
|
||||
if (!obj)
|
||||
return -EINVAL;
|
||||
|
||||
data = &obj->ih_data;
|
||||
|
||||
if (data->inuse == 0)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1003,6 +1003,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
|||
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
|
||||
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
|
||||
scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
|
||||
|
||||
mutex_lock(&adev->virt.rlcg_reg_lock);
|
||||
|
||||
if (reg_access_ctrl->spare_int)
|
||||
spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
|
||||
|
||||
|
@ -1058,6 +1061,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
|||
}
|
||||
|
||||
ret = readl(scratch_reg0);
|
||||
|
||||
mutex_unlock(&adev->virt.rlcg_reg_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -263,6 +263,8 @@ struct amdgpu_virt {
|
|||
|
||||
/* the ucode id to signal the autoload */
|
||||
uint32_t autoload_ucode_id;
|
||||
|
||||
struct mutex rlcg_reg_lock;
|
||||
};
|
||||
|
||||
struct amdgpu_video_codec_info;
|
||||
|
|
|
@ -102,6 +102,11 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
|||
if (!r)
|
||||
r = amdgpu_sync_push_to_job(&sync, p->job);
|
||||
amdgpu_sync_free(&sync);
|
||||
|
||||
if (r) {
|
||||
p->num_dw_left = 0;
|
||||
amdgpu_job_free(p->job);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -2628,7 +2628,8 @@ static int dm_suspend(void *handle)
|
|||
|
||||
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
|
||||
|
||||
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
|
||||
if (dm->cached_dc_state)
|
||||
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
|
||||
|
||||
amdgpu_dm_commit_zero_streams(dm->dc);
|
||||
|
||||
|
@ -6483,7 +6484,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
|
|||
aconnector->dc_sink = aconnector->dc_link->local_sink ?
|
||||
aconnector->dc_link->local_sink :
|
||||
aconnector->dc_em_sink;
|
||||
dc_sink_retain(aconnector->dc_sink);
|
||||
if (aconnector->dc_sink)
|
||||
dc_sink_retain(aconnector->dc_sink);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7296,7 +7298,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
|||
drm_add_modes_noedid(connector, 1920, 1080);
|
||||
} else {
|
||||
amdgpu_dm_connector_ddc_get_modes(connector, edid);
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
if (encoder)
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
amdgpu_dm_connector_add_freesync_modes(connector, edid);
|
||||
}
|
||||
amdgpu_dm_fbc_init(connector);
|
||||
|
|
|
@ -1266,6 +1266,9 @@ static bool is_dsc_need_re_compute(
|
|||
}
|
||||
}
|
||||
|
||||
if (new_stream_on_link_num == 0)
|
||||
return false;
|
||||
|
||||
/* check current_state if there stream on link but it is not in
|
||||
* new request state
|
||||
*/
|
||||
|
|
|
@ -162,7 +162,12 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
|
|||
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
|
||||
link_res->hpo_dp_link_enc, tp_params);
|
||||
}
|
||||
|
||||
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
|
||||
|
||||
// Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1
|
||||
if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE)
|
||||
msleep(30);
|
||||
}
|
||||
|
||||
static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
|
||||
|
|
|
@ -927,7 +927,7 @@ static int pp_dpm_switch_power_profile(void *handle,
|
|||
enum PP_SMC_POWER_PROFILE type, bool en)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
long workload;
|
||||
long workload[1];
|
||||
uint32_t index;
|
||||
|
||||
if (!hwmgr || !hwmgr->pm_en)
|
||||
|
@ -945,12 +945,12 @@ static int pp_dpm_switch_power_profile(void *handle,
|
|||
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
|
||||
index = fls(hwmgr->workload_mask);
|
||||
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
|
||||
workload = hwmgr->workload_setting[index];
|
||||
workload[0] = hwmgr->workload_setting[index];
|
||||
} else {
|
||||
hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
|
||||
index = fls(hwmgr->workload_mask);
|
||||
index = index <= Workload_Policy_Max ? index - 1 : 0;
|
||||
workload = hwmgr->workload_setting[index];
|
||||
workload[0] = hwmgr->workload_setting[index];
|
||||
}
|
||||
|
||||
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
|
||||
|
@ -960,7 +960,7 @@ static int pp_dpm_switch_power_profile(void *handle,
|
|||
}
|
||||
|
||||
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
|
||||
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
|
||||
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
|
|||
struct pp_power_state *new_ps)
|
||||
{
|
||||
uint32_t index;
|
||||
long workload;
|
||||
long workload[1];
|
||||
|
||||
if (hwmgr->not_vf) {
|
||||
if (!skip_display_settings)
|
||||
|
@ -294,10 +294,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
|
|||
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
|
||||
index = fls(hwmgr->workload_mask);
|
||||
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
|
||||
workload = hwmgr->workload_setting[index];
|
||||
workload[0] = hwmgr->workload_setting[index];
|
||||
|
||||
if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
|
||||
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
|
||||
if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode)
|
||||
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct smu7_hwmgr *data;
|
||||
int result = 0;
|
||||
|
||||
|
@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
/* Initalize Dynamic State Adjustment Rule Settings */
|
||||
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
|
||||
|
||||
if (0 == result) {
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
if (result)
|
||||
goto fail;
|
||||
|
||||
data->is_tlu_enabled = false;
|
||||
data->is_tlu_enabled = false;
|
||||
|
||||
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
|
||||
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
|
||||
SMU7_MAX_HARDWARE_POWERLEVELS;
|
||||
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
|
||||
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
|
||||
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
|
||||
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
|
||||
|
||||
data->pcie_gen_cap = adev->pm.pcie_gen_mask;
|
||||
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
data->pcie_spc_cap = 20;
|
||||
else
|
||||
data->pcie_spc_cap = 16;
|
||||
data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
|
||||
data->pcie_gen_cap = adev->pm.pcie_gen_mask;
|
||||
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
data->pcie_spc_cap = 20;
|
||||
else
|
||||
data->pcie_spc_cap = 16;
|
||||
data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
|
||||
|
||||
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
|
||||
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
|
||||
hwmgr->platform_descriptor.clockStep.engineClock = 500;
|
||||
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
|
||||
smu7_thermal_parameter_init(hwmgr);
|
||||
} else {
|
||||
/* Ignore return value in here, we are cleaning up a mess. */
|
||||
smu7_hwmgr_backend_fini(hwmgr);
|
||||
}
|
||||
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
|
||||
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
|
||||
hwmgr->platform_descriptor.clockStep.engineClock = 500;
|
||||
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
|
||||
smu7_thermal_parameter_init(hwmgr);
|
||||
|
||||
result = smu7_update_edc_leakage_table(hwmgr);
|
||||
if (result) {
|
||||
smu7_hwmgr_backend_fini(hwmgr);
|
||||
return result;
|
||||
}
|
||||
if (result)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
smu7_hwmgr_backend_fini(hwmgr);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
|
||||
|
@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
const struct pp_power_state *current_ps)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct smu7_power_state *smu7_ps =
|
||||
cast_phw_smu7_power_state(&request_ps->hardware);
|
||||
struct smu7_power_state *smu7_ps;
|
||||
uint32_t sclk;
|
||||
uint32_t mclk;
|
||||
struct PP_Clocks minimum_clocks = {0};
|
||||
|
@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
uint32_t latency;
|
||||
bool latency_allowed = false;
|
||||
|
||||
smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware);
|
||||
if (!smu7_ps)
|
||||
return -EINVAL;
|
||||
|
||||
data->battery_state = (PP_StateUILabel_Battery ==
|
||||
request_ps->classification.ui_label);
|
||||
data->mclk_ignore_signal = false;
|
||||
|
|
|
@ -1065,16 +1065,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
struct pp_power_state *prequest_ps,
|
||||
const struct pp_power_state *pcurrent_ps)
|
||||
{
|
||||
struct smu8_power_state *smu8_ps =
|
||||
cast_smu8_power_state(&prequest_ps->hardware);
|
||||
|
||||
const struct smu8_power_state *smu8_current_ps =
|
||||
cast_const_smu8_power_state(&pcurrent_ps->hardware);
|
||||
|
||||
struct smu8_power_state *smu8_ps;
|
||||
const struct smu8_power_state *smu8_current_ps;
|
||||
struct smu8_hwmgr *data = hwmgr->backend;
|
||||
struct PP_Clocks clocks = {0, 0, 0, 0};
|
||||
bool force_high;
|
||||
|
||||
smu8_ps = cast_smu8_power_state(&prequest_ps->hardware);
|
||||
smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware);
|
||||
|
||||
if (!smu8_ps || !smu8_current_ps)
|
||||
return -EINVAL;
|
||||
|
||||
smu8_ps->need_dfs_bypass = true;
|
||||
|
||||
data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
|
||||
|
|
|
@ -3259,8 +3259,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
const struct pp_power_state *current_ps)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct vega10_power_state *vega10_ps =
|
||||
cast_phw_vega10_power_state(&request_ps->hardware);
|
||||
struct vega10_power_state *vega10_ps;
|
||||
uint32_t sclk;
|
||||
uint32_t mclk;
|
||||
struct PP_Clocks minimum_clocks = {0};
|
||||
|
@ -3278,6 +3277,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
|
||||
uint32_t latency;
|
||||
|
||||
vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware);
|
||||
if (!vega10_ps)
|
||||
return -EINVAL;
|
||||
|
||||
data->battery_state = (PP_StateUILabel_Battery ==
|
||||
request_ps->classification.ui_label);
|
||||
|
||||
|
@ -3415,13 +3418,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
|
|||
const struct vega10_power_state *vega10_ps =
|
||||
cast_const_phw_vega10_power_state(states->pnew_state);
|
||||
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
|
||||
uint32_t sclk = vega10_ps->performance_levels
|
||||
[vega10_ps->performance_level_count - 1].gfx_clock;
|
||||
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
|
||||
uint32_t mclk = vega10_ps->performance_levels
|
||||
[vega10_ps->performance_level_count - 1].mem_clock;
|
||||
uint32_t sclk, mclk;
|
||||
uint32_t i;
|
||||
|
||||
if (vega10_ps == NULL)
|
||||
return -EINVAL;
|
||||
sclk = vega10_ps->performance_levels
|
||||
[vega10_ps->performance_level_count - 1].gfx_clock;
|
||||
mclk = vega10_ps->performance_levels
|
||||
[vega10_ps->performance_level_count - 1].mem_clock;
|
||||
|
||||
for (i = 0; i < sclk_table->count; i++) {
|
||||
if (sclk == sclk_table->dpm_levels[i].value)
|
||||
break;
|
||||
|
@ -3728,6 +3735,9 @@ static int vega10_generate_dpm_level_enable_mask(
|
|||
cast_const_phw_vega10_power_state(states->pnew_state);
|
||||
int i;
|
||||
|
||||
if (vega10_ps == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
|
||||
"Attempt to Trim DPM States Failed!",
|
||||
return -1);
|
||||
|
@ -4995,6 +5005,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
|
|||
|
||||
vega10_psa = cast_const_phw_vega10_power_state(pstate1);
|
||||
vega10_psb = cast_const_phw_vega10_power_state(pstate2);
|
||||
if (vega10_psa == NULL || vega10_psb == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* If the two states don't even have the same number of performance levels
|
||||
* they cannot be the same state.
|
||||
|
@ -5128,6 +5140,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
|
|||
return -EINVAL;
|
||||
|
||||
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
|
||||
if (vega10_ps == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
vega10_ps->performance_levels
|
||||
[vega10_ps->performance_level_count - 1].gfx_clock =
|
||||
|
@ -5179,6 +5193,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
|
|||
return -EINVAL;
|
||||
|
||||
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
|
||||
if (vega10_ps == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
vega10_ps->performance_levels
|
||||
[vega10_ps->performance_level_count - 1].mem_clock =
|
||||
|
@ -5420,6 +5436,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
|
|||
return;
|
||||
|
||||
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
|
||||
if (vega10_ps == NULL)
|
||||
return;
|
||||
|
||||
max_level = vega10_ps->performance_level_count - 1;
|
||||
|
||||
if (vega10_ps->performance_levels[max_level].gfx_clock !=
|
||||
|
@ -5442,6 +5461,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
|
|||
|
||||
ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
|
||||
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
|
||||
if (vega10_ps == NULL)
|
||||
return;
|
||||
|
||||
max_level = vega10_ps->performance_level_count - 1;
|
||||
|
||||
if (vega10_ps->performance_levels[max_level].gfx_clock !=
|
||||
|
@ -5632,6 +5654,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
|
|||
return -EINVAL;
|
||||
|
||||
vega10_ps = cast_const_phw_vega10_power_state(state);
|
||||
if (vega10_ps == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
i = index > vega10_ps->performance_level_count - 1 ?
|
||||
vega10_ps->performance_level_count - 1 : index;
|
||||
|
|
|
@ -1846,7 +1846,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
|||
{
|
||||
int ret = 0;
|
||||
int index = 0;
|
||||
long workload;
|
||||
long workload[1];
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
if (!skip_display_settings) {
|
||||
|
@ -1886,10 +1886,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
|||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload = smu->workload_setting[index];
|
||||
workload[0] = smu->workload_setting[index];
|
||||
|
||||
if (smu->power_profile_mode != workload)
|
||||
smu_bump_power_profile_mode(smu, &workload, 0);
|
||||
if (smu->power_profile_mode != workload[0])
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1939,7 +1939,7 @@ static int smu_switch_power_profile(void *handle,
|
|||
{
|
||||
struct smu_context *smu = handle;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
long workload;
|
||||
long workload[1];
|
||||
uint32_t index;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
|
@ -1952,17 +1952,17 @@ static int smu_switch_power_profile(void *handle,
|
|||
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload = smu->workload_setting[index];
|
||||
workload[0] = smu->workload_setting[index];
|
||||
} else {
|
||||
smu->workload_mask |= (1 << smu->workload_prority[type]);
|
||||
index = fls(smu->workload_mask);
|
||||
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload = smu->workload_setting[index];
|
||||
workload[0] = smu->workload_setting[index];
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
smu_bump_power_profile_mode(smu, &workload, 0);
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
|
|||
u32 status_reg;
|
||||
u8 *buffer = msg->buffer;
|
||||
unsigned int i;
|
||||
int num_transferred = 0;
|
||||
int ret;
|
||||
|
||||
/* Buffer size of AUX CH is 16 bytes */
|
||||
|
@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
|
|||
reg = buffer[i];
|
||||
writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
|
||||
4 * i);
|
||||
num_transferred++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
|
|||
reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
|
||||
4 * i);
|
||||
buffer[i] = (unsigned char)reg;
|
||||
num_transferred++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
|
|||
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
|
||||
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
|
||||
|
||||
return num_transferred > 0 ? num_transferred : -EBUSY;
|
||||
return msg->size;
|
||||
|
||||
aux_error:
|
||||
/* if aux err happen, reset aux */
|
||||
|
|
|
@ -4024,6 +4024,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
|||
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
|
||||
const struct drm_dp_connection_status_notify *conn_stat =
|
||||
&up_req->msg.u.conn_stat;
|
||||
bool handle_csn;
|
||||
|
||||
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
|
||||
conn_stat->port_number,
|
||||
|
@ -4032,6 +4033,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
|||
conn_stat->message_capability_status,
|
||||
conn_stat->input_port,
|
||||
conn_stat->peer_device_type);
|
||||
|
||||
mutex_lock(&mgr->probe_lock);
|
||||
handle_csn = mgr->mst_primary->link_address_sent;
|
||||
mutex_unlock(&mgr->probe_lock);
|
||||
|
||||
if (!handle_csn) {
|
||||
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
|
||||
kfree(up_req);
|
||||
goto out;
|
||||
}
|
||||
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
|
||||
const struct drm_dp_resource_status_notify *res_stat =
|
||||
&up_req->msg.u.resource_stat;
|
||||
|
|
|
@ -869,6 +869,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
|
|||
|
||||
kfree(modeset->mode);
|
||||
modeset->mode = drm_mode_duplicate(dev, mode);
|
||||
if (!modeset->mode) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
drm_connector_get(connector);
|
||||
modeset->connectors[modeset->num_connectors++] = connector;
|
||||
modeset->x = offset->x;
|
||||
|
|
|
@ -290,6 +290,41 @@ out:
|
|||
return i915_error_to_vmf_fault(err);
|
||||
}
|
||||
|
||||
static void set_address_limits(struct vm_area_struct *area,
|
||||
struct i915_vma *vma,
|
||||
unsigned long obj_offset,
|
||||
unsigned long *start_vaddr,
|
||||
unsigned long *end_vaddr)
|
||||
{
|
||||
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
|
||||
long start, end; /* memory boundaries */
|
||||
|
||||
/*
|
||||
* Let's move into the ">> PAGE_SHIFT"
|
||||
* domain to be sure not to lose bits
|
||||
*/
|
||||
vm_start = area->vm_start >> PAGE_SHIFT;
|
||||
vm_end = area->vm_end >> PAGE_SHIFT;
|
||||
vma_size = vma->size >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* Calculate the memory boundaries by considering the offset
|
||||
* provided by the user during memory mapping and the offset
|
||||
* provided for the partial mapping.
|
||||
*/
|
||||
start = vm_start;
|
||||
start -= obj_offset;
|
||||
start += vma->gtt_view.partial.offset;
|
||||
end = start + vma_size;
|
||||
|
||||
start = max_t(long, start, vm_start);
|
||||
end = min_t(long, end, vm_end);
|
||||
|
||||
/* Let's move back into the "<< PAGE_SHIFT" domain */
|
||||
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
|
||||
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
|
||||
{
|
||||
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
|
||||
|
@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
|
|||
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||
bool write = area->vm_flags & VM_WRITE;
|
||||
struct i915_gem_ww_ctx ww;
|
||||
unsigned long obj_offset;
|
||||
unsigned long start, end; /* memory boundaries */
|
||||
intel_wakeref_t wakeref;
|
||||
struct i915_vma *vma;
|
||||
pgoff_t page_offset;
|
||||
unsigned long pfn;
|
||||
int srcu;
|
||||
int ret;
|
||||
|
||||
/* We don't use vmf->pgoff since that has the fake offset */
|
||||
obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
|
||||
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
|
||||
page_offset += obj_offset;
|
||||
|
||||
trace_i915_gem_object_fault(obj, page_offset, true, write);
|
||||
|
||||
|
@ -402,12 +441,14 @@ retry:
|
|||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
set_address_limits(area, vma, obj_offset, &start, &end);
|
||||
|
||||
pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
|
||||
pfn += (start - area->vm_start) >> PAGE_SHIFT;
|
||||
pfn += obj_offset - vma->gtt_view.partial.offset;
|
||||
|
||||
/* Finally, remap it using the new GTT offset */
|
||||
ret = remap_io_mapping(area,
|
||||
area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
|
||||
(ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
|
||||
min_t(u64, vma->size, area->vm_end - area->vm_start),
|
||||
&ggtt->iomap);
|
||||
ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
|
||||
if (ret)
|
||||
goto err_fence;
|
||||
|
||||
|
@ -1088,6 +1129,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
|
|||
mmo = mmap_offset_attach(obj, mmap_type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
|
||||
vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -486,3 +486,4 @@ module_platform_driver(lima_platform_driver);
|
|||
MODULE_AUTHOR("Lima Project Developers");
|
||||
MODULE_DESCRIPTION("Lima DRM Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_SOFTDEP("pre: governor_simpleondemand");
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
static int mga_i2c_read_gpio(struct mga_device *mdev)
|
||||
|
@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data)
|
|||
return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
|
||||
}
|
||||
|
||||
static void mgag200_i2c_release(void *res)
|
||||
static void mgag200_i2c_release(struct drm_device *dev, void *res)
|
||||
{
|
||||
struct mga_i2c_chan *i2c = res;
|
||||
|
||||
|
@ -115,7 +117,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
|
|||
i2c->adapter.algo_data = &i2c->bit;
|
||||
|
||||
i2c->bit.udelay = 10;
|
||||
i2c->bit.timeout = 2;
|
||||
i2c->bit.timeout = usecs_to_jiffies(2200);
|
||||
i2c->bit.data = i2c;
|
||||
i2c->bit.setsda = mga_gpio_setsda;
|
||||
i2c->bit.setscl = mga_gpio_setscl;
|
||||
|
@ -126,5 +128,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c);
|
||||
return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c);
|
||||
}
|
||||
|
|
|
@ -1320,6 +1320,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
|||
|
||||
drm_gpuva_for_each_op(va_op, op->ops) {
|
||||
struct drm_gem_object *obj = op_gem_obj(va_op);
|
||||
struct nouveau_bo *nvbo;
|
||||
|
||||
if (unlikely(!obj))
|
||||
continue;
|
||||
|
@ -1330,8 +1331,9 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
|||
if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
|
||||
continue;
|
||||
|
||||
ret = nouveau_bo_validate(nouveau_gem_object(obj),
|
||||
true, false);
|
||||
nvbo = nouveau_gem_object(obj);
|
||||
nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
|
||||
ret = nouveau_bo_validate(nvbo, true, false);
|
||||
if (ret) {
|
||||
op = list_last_op(&bind_job->ops);
|
||||
goto unwind;
|
||||
|
|
|
@ -439,7 +439,7 @@ typedef struct _StateArray{
|
|||
//how many states we have
|
||||
UCHAR ucNumEntries;
|
||||
|
||||
ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
|
||||
ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
|
||||
}StateArray;
|
||||
|
||||
|
||||
|
|
|
@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = {
|
|||
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */
|
||||
{ HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
|
||||
|
|
|
@ -991,8 +991,11 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
|
|||
return ret;
|
||||
|
||||
ret = geni_se_resources_on(&gi2c->se);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
clk_disable_unprepare(gi2c->core_clk);
|
||||
geni_icc_disable(&gi2c->se);
|
||||
return ret;
|
||||
}
|
||||
|
||||
enable_irq(gi2c->irq);
|
||||
gi2c->suspended = 0;
|
||||
|
|
|
@ -34,6 +34,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
|
|||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
struct alert_data *data = addrp;
|
||||
struct i2c_driver *driver;
|
||||
int ret;
|
||||
|
||||
if (!client || client->addr != data->addr)
|
||||
return 0;
|
||||
|
@ -47,16 +48,47 @@ static int smbus_do_alert(struct device *dev, void *addrp)
|
|||
device_lock(dev);
|
||||
if (client->dev.driver) {
|
||||
driver = to_i2c_driver(client->dev.driver);
|
||||
if (driver->alert)
|
||||
if (driver->alert) {
|
||||
/* Stop iterating after we find the device */
|
||||
driver->alert(client, data->type, data->data);
|
||||
else
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
dev_warn(&client->dev, "no driver alert()!\n");
|
||||
} else
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
} else {
|
||||
dev_dbg(&client->dev, "alert with no driver\n");
|
||||
ret = -ENODEV;
|
||||
}
|
||||
device_unlock(dev);
|
||||
|
||||
/* Stop iterating after we find the device */
|
||||
return -EBUSY;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Same as above, but call back all drivers with alert handler */
|
||||
|
||||
static int smbus_do_alert_force(struct device *dev, void *addrp)
|
||||
{
|
||||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
struct alert_data *data = addrp;
|
||||
struct i2c_driver *driver;
|
||||
|
||||
if (!client || (client->flags & I2C_CLIENT_TEN))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Drivers should either disable alerts, or provide at least
|
||||
* a minimal handler. Lock so the driver won't change.
|
||||
*/
|
||||
device_lock(dev);
|
||||
if (client->dev.driver) {
|
||||
driver = to_i2c_driver(client->dev.driver);
|
||||
if (driver->alert)
|
||||
driver->alert(client, data->type, data->data);
|
||||
}
|
||||
device_unlock(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -67,6 +99,7 @@ static irqreturn_t smbus_alert(int irq, void *d)
|
|||
{
|
||||
struct i2c_smbus_alert *alert = d;
|
||||
struct i2c_client *ara;
|
||||
unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */
|
||||
|
||||
ara = alert->ara;
|
||||
|
||||
|
@ -94,8 +127,25 @@ static irqreturn_t smbus_alert(int irq, void *d)
|
|||
data.addr, data.data);
|
||||
|
||||
/* Notify driver for the device which issued the alert */
|
||||
device_for_each_child(&ara->adapter->dev, &data,
|
||||
smbus_do_alert);
|
||||
status = device_for_each_child(&ara->adapter->dev, &data,
|
||||
smbus_do_alert);
|
||||
/*
|
||||
* If we read the same address more than once, and the alert
|
||||
* was not handled by a driver, it won't do any good to repeat
|
||||
* the loop because it will never terminate. Try again, this
|
||||
* time calling the alert handlers of all devices connected to
|
||||
* the bus, and abort the loop afterwards. If this helps, we
|
||||
* are all set. If it doesn't, there is nothing else we can do,
|
||||
* so we might as well abort the loop.
|
||||
* Note: This assumes that a driver with alert handler handles
|
||||
* the alert properly and clears it if necessary.
|
||||
*/
|
||||
if (data.addr == prev_addr && status != -EBUSY) {
|
||||
device_for_each_child(&ara->adapter->dev, &data,
|
||||
smbus_do_alert_force);
|
||||
break;
|
||||
}
|
||||
prev_addr = data.addr;
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
|
|
@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle;
|
|||
|
||||
static u32 lpic_gsi_to_irq(u32 gsi)
|
||||
{
|
||||
int irq = 0;
|
||||
|
||||
/* Only pch irqdomain transferring is required for LoongArch. */
|
||||
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
|
||||
return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
|
||||
irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
|
||||
|
||||
return 0;
|
||||
return (irq > 0) ? irq : 0;
|
||||
}
|
||||
|
||||
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
|
||||
|
|
|
@ -64,6 +64,20 @@ struct mbigen_device {
|
|||
void __iomem *base;
|
||||
};
|
||||
|
||||
static inline unsigned int get_mbigen_node_offset(unsigned int nid)
|
||||
{
|
||||
unsigned int offset = nid * MBIGEN_NODE_OFFSET;
|
||||
|
||||
/*
|
||||
* To avoid touched clear register in unexpected way, we need to directly
|
||||
* skip clear register when access to more than 10 mbigen nodes.
|
||||
*/
|
||||
if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
|
||||
offset += MBIGEN_NODE_OFFSET;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
|
||||
{
|
||||
unsigned int nid, pin;
|
||||
|
@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
|
|||
nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
|
||||
pin = hwirq % IRQS_PER_MBIGEN_NODE;
|
||||
|
||||
return pin * 4 + nid * MBIGEN_NODE_OFFSET
|
||||
+ REG_MBIGEN_VEC_OFFSET;
|
||||
return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
|
||||
}
|
||||
|
||||
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
|
||||
|
@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
|
|||
*mask = 1 << (irq_ofst % 32);
|
||||
ofst = irq_ofst / 32 * 4;
|
||||
|
||||
*addr = ofst + nid * MBIGEN_NODE_OFFSET
|
||||
+ REG_MBIGEN_TYPE_OFFSET;
|
||||
*addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
|
||||
}
|
||||
|
||||
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
|
||||
|
|
|
@ -173,7 +173,7 @@ struct meson_gpio_irq_controller {
|
|||
void __iomem *base;
|
||||
u32 channel_irqs[MAX_NUM_CHANNEL];
|
||||
DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
|
||||
|
@ -182,14 +182,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
|
|||
unsigned long flags;
|
||||
u32 tmp;
|
||||
|
||||
spin_lock_irqsave(&ctl->lock, flags);
|
||||
raw_spin_lock_irqsave(&ctl->lock, flags);
|
||||
|
||||
tmp = readl_relaxed(ctl->base + reg);
|
||||
tmp &= ~mask;
|
||||
tmp |= val;
|
||||
writel_relaxed(tmp, ctl->base + reg);
|
||||
|
||||
spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
}
|
||||
|
||||
static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
|
||||
|
@ -239,12 +239,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
|
|||
unsigned long flags;
|
||||
unsigned int idx;
|
||||
|
||||
spin_lock_irqsave(&ctl->lock, flags);
|
||||
raw_spin_lock_irqsave(&ctl->lock, flags);
|
||||
|
||||
/* Find a free channel */
|
||||
idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
|
||||
if (idx >= ctl->params->nr_channels) {
|
||||
spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
pr_err("No channel available\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
|
|||
/* Mark the channel as used */
|
||||
set_bit(idx, ctl->channel_map);
|
||||
|
||||
spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
|
||||
/*
|
||||
* Setup the mux of the channel to route the signal of the pad
|
||||
|
@ -562,7 +562,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
|
|||
if (!ctl)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&ctl->lock);
|
||||
raw_spin_lock_init(&ctl->lock);
|
||||
|
||||
ctl->base = of_iomap(node, 0);
|
||||
if (!ctl->base) {
|
||||
|
|
|
@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
|
|||
irqc->intr_mask = 0;
|
||||
}
|
||||
|
||||
if (irqc->intr_mask >> irqc->nr_irq)
|
||||
if ((u64)irqc->intr_mask >> irqc->nr_irq)
|
||||
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
|
||||
|
||||
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
|
||||
|
|
|
@ -473,7 +473,6 @@ void mddev_suspend(struct mddev *mddev)
|
|||
clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
|
||||
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
|
||||
|
||||
del_timer_sync(&mddev->safemode_timer);
|
||||
/* restrict memory reclaim I/O during raid array is suspend */
|
||||
mddev->noio_flag = memalloc_noio_save();
|
||||
}
|
||||
|
|
|
@ -6326,7 +6326,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
|||
safepos = conf->reshape_safe;
|
||||
sector_div(safepos, data_disks);
|
||||
if (mddev->reshape_backwards) {
|
||||
BUG_ON(writepos < reshape_sectors);
|
||||
if (WARN_ON(writepos < reshape_sectors))
|
||||
return MaxSector;
|
||||
|
||||
writepos -= reshape_sectors;
|
||||
readpos += reshape_sectors;
|
||||
safepos += reshape_sectors;
|
||||
|
@ -6344,14 +6346,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
|
|||
* to set 'stripe_addr' which is where we will write to.
|
||||
*/
|
||||
if (mddev->reshape_backwards) {
|
||||
BUG_ON(conf->reshape_progress == 0);
|
||||
if (WARN_ON(conf->reshape_progress == 0))
|
||||
return MaxSector;
|
||||
|
||||
stripe_addr = writepos;
|
||||
BUG_ON((mddev->dev_sectors &
|
||||
~((sector_t)reshape_sectors - 1))
|
||||
- reshape_sectors - stripe_addr
|
||||
!= sector_nr);
|
||||
if (WARN_ON((mddev->dev_sectors &
|
||||
~((sector_t)reshape_sectors - 1)) -
|
||||
reshape_sectors - stripe_addr != sector_nr))
|
||||
return MaxSector;
|
||||
} else {
|
||||
BUG_ON(writepos != sector_nr + reshape_sectors);
|
||||
if (WARN_ON(writepos != sector_nr + reshape_sectors))
|
||||
return MaxSector;
|
||||
|
||||
stripe_addr = sector_nr;
|
||||
}
|
||||
|
||||
|
|
|
@ -195,7 +195,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
|||
struct vdec_t *vdec = inst->priv;
|
||||
int ret = 0;
|
||||
|
||||
vpu_inst_lock(inst);
|
||||
switch (ctrl->id) {
|
||||
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
|
||||
vdec->params.display_delay_enable = ctrl->val;
|
||||
|
@ -207,7 +206,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
|||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
vpu_inst_unlock(inst);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -518,7 +518,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
|||
struct venc_t *venc = inst->priv;
|
||||
int ret = 0;
|
||||
|
||||
vpu_inst_lock(inst);
|
||||
switch (ctrl->id) {
|
||||
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
|
||||
venc->params.profile = ctrl->val;
|
||||
|
@ -579,7 +578,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
|
|||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
vpu_inst_unlock(inst);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1361,9 +1361,16 @@ static void load_firmware_cb(const struct firmware *fw,
|
|||
void *context)
|
||||
{
|
||||
struct dvb_frontend *fe = context;
|
||||
struct xc2028_data *priv = fe->tuner_priv;
|
||||
struct xc2028_data *priv;
|
||||
int rc;
|
||||
|
||||
if (!fe) {
|
||||
pr_warn("xc2028: No frontend in %s\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
priv = fe->tuner_priv;
|
||||
|
||||
tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error");
|
||||
if (!fw) {
|
||||
tuner_err("Could not load firmware %s.\n", priv->fname);
|
||||
|
|
|
@ -214,13 +214,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
|
|||
* Compute a bandwidth estimation by multiplying the frame
|
||||
* size by the number of video frames per second, divide the
|
||||
* result by the number of USB frames (or micro-frames for
|
||||
* high-speed devices) per second and add the UVC header size
|
||||
* (assumed to be 12 bytes long).
|
||||
* high- and super-speed devices) per second and add the UVC
|
||||
* header size (assumed to be 12 bytes long).
|
||||
*/
|
||||
bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp;
|
||||
bandwidth *= 10000000 / interval + 1;
|
||||
bandwidth /= 1000;
|
||||
if (stream->dev->udev->speed == USB_SPEED_HIGH)
|
||||
if (stream->dev->udev->speed >= USB_SPEED_HIGH)
|
||||
bandwidth /= 8;
|
||||
bandwidth += 12;
|
||||
|
||||
|
@ -478,6 +478,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
|
|||
ktime_t time;
|
||||
u16 host_sof;
|
||||
u16 dev_sof;
|
||||
u32 dev_stc;
|
||||
|
||||
switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
|
||||
case UVC_STREAM_PTS | UVC_STREAM_SCR:
|
||||
|
@ -526,6 +527,34 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
|
|||
if (dev_sof == stream->clock.last_sof)
|
||||
return;
|
||||
|
||||
dev_stc = get_unaligned_le32(&data[header_size - 6]);
|
||||
|
||||
/*
|
||||
* STC (Source Time Clock) is the clock used by the camera. The UVC 1.5
|
||||
* standard states that it "must be captured when the first video data
|
||||
* of a video frame is put on the USB bus". This is generally understood
|
||||
* as requiring devices to clear the payload header's SCR bit before
|
||||
* the first packet containing video data.
|
||||
*
|
||||
* Most vendors follow that interpretation, but some (namely SunplusIT
|
||||
* on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR
|
||||
* field with 0's,and expect that the driver only processes the SCR if
|
||||
* there is data in the packet.
|
||||
*
|
||||
* Ignore all the hardware timestamp information if we haven't received
|
||||
* any data for this frame yet, the packet contains no data, and both
|
||||
* STC and SOF are zero. This heuristics should be safe on compliant
|
||||
* devices. This should be safe with compliant devices, as in the very
|
||||
* unlikely case where a UVC 1.1 device would send timing information
|
||||
* only before the first packet containing data, and both STC and SOF
|
||||
* happen to be zero for a particular frame, we would only miss one
|
||||
* clock sample from many and the clock recovery algorithm wouldn't
|
||||
* suffer from this condition.
|
||||
*/
|
||||
if (buf && buf->bytesused == 0 && len == header_size &&
|
||||
dev_stc == 0 && dev_sof == 0)
|
||||
return;
|
||||
|
||||
stream->clock.last_sof = dev_sof;
|
||||
|
||||
host_sof = usb_get_current_frame_number(stream->dev->udev);
|
||||
|
@ -575,7 +604,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
|
|||
spin_lock_irqsave(&stream->clock.lock, flags);
|
||||
|
||||
sample = &stream->clock.samples[stream->clock.head];
|
||||
sample->dev_stc = get_unaligned_le32(&data[header_size - 6]);
|
||||
sample->dev_stc = dev_stc;
|
||||
sample->dev_sof = dev_sof;
|
||||
sample->host_sof = host_sof;
|
||||
sample->host_time = time;
|
||||
|
|
|
@ -485,6 +485,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
|
|||
clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
|
||||
}
|
||||
|
||||
tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
|
||||
ilog2(tx_ring->obj_num);
|
||||
tx_ring->obj_size = tx_obj_size;
|
||||
|
||||
rem = priv->rx_obj_num;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
//
|
||||
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
|
||||
//
|
||||
// Copyright (c) 2019, 2020, 2021 Pengutronix,
|
||||
// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
|
||||
// Marc Kleine-Budde <kernel@pengutronix.de>
|
||||
//
|
||||
// Based on:
|
||||
|
@ -16,6 +16,11 @@
|
|||
|
||||
#include "mcp251xfd.h"
|
||||
|
||||
static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
|
||||
{
|
||||
return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
|
||||
}
|
||||
|
||||
static inline int
|
||||
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
|
||||
u8 *tef_tail)
|
||||
|
@ -55,56 +60,39 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
|
||||
{
|
||||
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
|
||||
u32 tef_sta;
|
||||
int err;
|
||||
|
||||
err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
|
||||
netdev_err(priv->ndev,
|
||||
"Transmit Event FIFO buffer overflow.\n");
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
netdev_info(priv->ndev,
|
||||
"Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
|
||||
tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
|
||||
"full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
|
||||
"not empty" : "empty",
|
||||
seq, priv->tef->tail, priv->tef->head, tx_ring->head);
|
||||
|
||||
/* The Sequence Number in the TEF doesn't match our tef_tail. */
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int
|
||||
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
|
||||
const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
|
||||
unsigned int *frame_len_ptr)
|
||||
{
|
||||
struct net_device_stats *stats = &priv->ndev->stats;
|
||||
u32 seq, tef_tail_masked, tef_tail;
|
||||
struct sk_buff *skb;
|
||||
u32 seq, seq_masked, tef_tail_masked, tef_tail;
|
||||
|
||||
seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
|
||||
/* Use the MCP2517FD mask on the MCP2518FD, too. We only
|
||||
* compare 7 bits, this is enough to detect old TEF objects.
|
||||
*/
|
||||
seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
|
||||
hw_tef_obj->flags);
|
||||
|
||||
/* Use the MCP2517FD mask on the MCP2518FD, too. We only
|
||||
* compare 7 bits, this should be enough to detect
|
||||
* net-yet-completed, i.e. old TEF objects.
|
||||
*/
|
||||
seq_masked = seq &
|
||||
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
|
||||
tef_tail_masked = priv->tef->tail &
|
||||
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
|
||||
if (seq_masked != tef_tail_masked)
|
||||
return mcp251xfd_handle_tefif_recover(priv, seq);
|
||||
|
||||
/* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
|
||||
* bits of a FIFOSTA register, here the TX FIFO tail index
|
||||
* might be corrupted and we might process past the TEF FIFO's
|
||||
* head into old CAN frames.
|
||||
*
|
||||
* Compare the sequence number of the currently processed CAN
|
||||
* frame with the expected sequence number. Abort with
|
||||
* -EBADMSG if an old CAN frame is detected.
|
||||
*/
|
||||
if (seq != tef_tail_masked) {
|
||||
netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
|
||||
seq, tef_tail_masked);
|
||||
stats->tx_fifo_errors++;
|
||||
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
tef_tail = mcp251xfd_get_tef_tail(priv);
|
||||
skb = priv->can.echo_skb[tef_tail];
|
||||
|
@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
|
||||
static int
|
||||
mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
|
||||
{
|
||||
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
|
||||
unsigned int new_head;
|
||||
u8 chip_tx_tail;
|
||||
const u8 shift = tx_ring->obj_num_shift_to_u8;
|
||||
u8 chip_tx_tail, tail, len;
|
||||
u32 fifo_sta;
|
||||
int err;
|
||||
|
||||
err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
|
||||
err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
|
||||
&fifo_sta);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* chip_tx_tail, is the next TX-Object send by the HW.
|
||||
* The new TEF head must be >= the old head, ...
|
||||
if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
|
||||
*len_p = tx_ring->obj_num;
|
||||
return 0;
|
||||
}
|
||||
|
||||
chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
|
||||
|
||||
err = mcp251xfd_check_tef_tail(priv);
|
||||
if (err)
|
||||
return err;
|
||||
tail = mcp251xfd_get_tef_tail(priv);
|
||||
|
||||
/* First shift to full u8. The subtraction works on signed
|
||||
* values, that keeps the difference steady around the u8
|
||||
* overflow. The right shift acts on len, which is an u8.
|
||||
*/
|
||||
new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
|
||||
if (new_head <= priv->tef->head)
|
||||
new_head += tx_ring->obj_num;
|
||||
BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
|
||||
BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
|
||||
BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
|
||||
|
||||
/* ... but it cannot exceed the TX head. */
|
||||
priv->tef->head = min(new_head, tx_ring->head);
|
||||
len = (chip_tx_tail << shift) - (tail << shift);
|
||||
*len_p = len >> shift;
|
||||
|
||||
return mcp251xfd_check_tef_tail(priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
|
|||
u8 tef_tail, len, l;
|
||||
int err, i;
|
||||
|
||||
err = mcp251xfd_tef_ring_update(priv);
|
||||
err = mcp251xfd_get_tef_len(priv, &len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
tef_tail = mcp251xfd_get_tef_tail(priv);
|
||||
len = mcp251xfd_get_tef_len(priv);
|
||||
l = mcp251xfd_get_tef_linear_len(priv);
|
||||
l = mcp251xfd_get_tef_linear_len(priv, len);
|
||||
err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
|
|||
unsigned int frame_len = 0;
|
||||
|
||||
err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
|
||||
/* -EAGAIN means the Sequence Number in the TEF
|
||||
* doesn't match our tef_tail. This can happen if we
|
||||
* read the TEF objects too early. Leave loop let the
|
||||
* interrupt handler call us again.
|
||||
/* -EBADMSG means we're affected by mcp2518fd erratum
|
||||
* DS80000789E 6., i.e. the Sequence Number in the TEF
|
||||
* doesn't match our tef_tail. Don't process any
|
||||
* further and mark processed frames as good.
|
||||
*/
|
||||
if (err == -EAGAIN)
|
||||
if (err == -EBADMSG)
|
||||
goto out_netif_wake_queue;
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -223,6 +226,8 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
|
|||
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
|
||||
int offset;
|
||||
|
||||
ring->head += len;
|
||||
|
||||
/* Increment the TEF FIFO tail pointer 'len' times in
|
||||
* a single SPI message.
|
||||
*
|
||||
|
|
|
@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring {
|
|||
|
||||
/* u8 obj_num equals tx_ring->obj_num */
|
||||
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
|
||||
/* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
|
||||
|
||||
union mcp251xfd_write_reg_buf irq_enable_buf;
|
||||
struct spi_transfer irq_enable_xfer;
|
||||
|
@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring {
|
|||
u8 nr;
|
||||
u8 fifo_nr;
|
||||
u8 obj_num;
|
||||
u8 obj_num_shift_to_u8;
|
||||
u8 obj_size;
|
||||
|
||||
struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
|
||||
|
@ -861,17 +863,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
|
|||
return priv->tef->tail & (priv->tx->obj_num - 1);
|
||||
}
|
||||
|
||||
static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
|
||||
static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
|
||||
{
|
||||
return priv->tef->head - priv->tef->tail;
|
||||
}
|
||||
|
||||
static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
|
||||
{
|
||||
u8 len;
|
||||
|
||||
len = mcp251xfd_get_tef_len(priv);
|
||||
|
||||
return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
|
||||
}
|
||||
|
||||
|
|
|
@ -678,8 +678,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
|
|||
of_remove_property(child, prop);
|
||||
|
||||
phydev = of_phy_find_device(child);
|
||||
if (phydev)
|
||||
if (phydev) {
|
||||
phy_device_remove(phydev);
|
||||
phy_device_free(phydev);
|
||||
}
|
||||
}
|
||||
|
||||
err = mdiobus_register(priv->slave_mii_bus);
|
||||
|
|
|
@ -42,19 +42,15 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device *kdev = &priv->pdev->dev;
|
||||
|
||||
if (dev->phydev) {
|
||||
if (dev->phydev)
|
||||
phy_ethtool_get_wol(dev->phydev, wol);
|
||||
if (wol->supported)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!device_can_wakeup(kdev)) {
|
||||
wol->supported = 0;
|
||||
wol->wolopts = 0;
|
||||
/* MAC is not wake-up capable, return what the PHY does */
|
||||
if (!device_can_wakeup(kdev))
|
||||
return;
|
||||
}
|
||||
|
||||
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
||||
/* Overlay MAC capabilities with that of the PHY queried before */
|
||||
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
||||
wol->wolopts = priv->wolopts;
|
||||
memset(wol->sopass, 0, sizeof(wol->sopass));
|
||||
|
||||
|
|
|
@ -775,6 +775,9 @@ void fec_ptp_stop(struct platform_device *pdev)
|
|||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
if (fep->pps_enable)
|
||||
fec_ptp_enable_pps(fep, 0);
|
||||
|
||||
cancel_delayed_work_sync(&fep->time_keep);
|
||||
hrtimer_cancel(&fep->perout_timer);
|
||||
if (fep->ptp_clock)
|
||||
|
|
|
@ -557,6 +557,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
|
|||
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
|
||||
return;
|
||||
|
||||
synchronize_irq(pf->oicr_irq.virq);
|
||||
|
||||
ice_unplug_aux_dev(pf);
|
||||
|
||||
/* Notify VFs of impending reset */
|
||||
|
|
|
@ -2374,6 +2374,9 @@ mpwrq_cqe_out:
|
|||
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
|
||||
return;
|
||||
|
||||
if (unlikely(!cstrides))
|
||||
return;
|
||||
|
||||
wq = &rq->mpwqe.wq;
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
|
||||
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#define RGMII_IO_MACRO_CONFIG2 0x1C
|
||||
#define RGMII_IO_MACRO_DEBUG1 0x20
|
||||
#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
|
||||
#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4
|
||||
|
||||
/* RGMII_IO_MACRO_CONFIG fields */
|
||||
#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
|
||||
|
@ -79,6 +80,9 @@
|
|||
#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
|
||||
#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
|
||||
|
||||
/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */
|
||||
#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
|
||||
|
||||
#define SGMII_10M_RX_CLK_DVDR 0x31
|
||||
|
||||
struct ethqos_emac_por {
|
||||
|
@ -95,6 +99,7 @@ struct ethqos_emac_driver_data {
|
|||
bool has_integrated_pcs;
|
||||
u32 dma_addr_width;
|
||||
struct dwmac4_addrs dwmac4_addrs;
|
||||
bool needs_sgmii_loopback;
|
||||
};
|
||||
|
||||
struct qcom_ethqos {
|
||||
|
@ -113,6 +118,7 @@ struct qcom_ethqos {
|
|||
unsigned int num_por;
|
||||
bool rgmii_config_loopback_en;
|
||||
bool has_emac_ge_3;
|
||||
bool needs_sgmii_loopback;
|
||||
};
|
||||
|
||||
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
|
||||
|
@ -187,8 +193,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
|
|||
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
|
||||
}
|
||||
|
||||
static void
|
||||
qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
|
||||
{
|
||||
if (!ethqos->needs_sgmii_loopback ||
|
||||
ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX)
|
||||
return;
|
||||
|
||||
rgmii_updatel(ethqos,
|
||||
SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
|
||||
enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0,
|
||||
EMAC_WRAPPER_SGMII_PHY_CNTRL1);
|
||||
}
|
||||
|
||||
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
|
||||
{
|
||||
qcom_ethqos_set_sgmii_loopback(ethqos, true);
|
||||
rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
|
||||
RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
|
||||
}
|
||||
|
@ -273,6 +293,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
|
|||
.has_emac_ge_3 = true,
|
||||
.link_clk_name = "phyaux",
|
||||
.has_integrated_pcs = true,
|
||||
.needs_sgmii_loopback = true,
|
||||
.dma_addr_width = 36,
|
||||
.dwmac4_addrs = {
|
||||
.dma_chan = 0x00008100,
|
||||
|
@ -646,6 +667,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo
|
|||
{
|
||||
struct qcom_ethqos *ethqos = priv;
|
||||
|
||||
qcom_ethqos_set_sgmii_loopback(ethqos, false);
|
||||
ethqos->speed = speed;
|
||||
ethqos_update_link_clk(ethqos, speed);
|
||||
ethqos_configure(ethqos);
|
||||
|
@ -781,6 +803,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
|||
ethqos->num_por = data->num_por;
|
||||
ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en;
|
||||
ethqos->has_emac_ge_3 = data->has_emac_ge_3;
|
||||
ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback;
|
||||
|
||||
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
|
||||
if (IS_ERR(ethqos->link_clk))
|
||||
|
|
|
@ -201,6 +201,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
break;
|
||||
default:
|
||||
/* not ip - do not know what to do */
|
||||
kfree_skb(skbn);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
|
|
|
@ -181,6 +181,8 @@ enum ath12k_dev_flags {
|
|||
ATH12K_FLAG_REGISTERED,
|
||||
ATH12K_FLAG_QMI_FAIL,
|
||||
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
|
||||
ATH12K_FLAG_CE_IRQ_ENABLED,
|
||||
ATH12K_FLAG_EXT_IRQ_ENABLED,
|
||||
};
|
||||
|
||||
enum ath12k_monitor_flags {
|
||||
|
|
|
@ -2759,6 +2759,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
|
|||
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
|
||||
if (!peer) {
|
||||
spin_unlock_bh(&ab->base_lock);
|
||||
crypto_free_shash(tfm);
|
||||
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
|
|
@ -10,17 +10,17 @@
|
|||
#include "core.h"
|
||||
|
||||
struct ath12k_hif_ops {
|
||||
u32 (*read32)(struct ath12k_base *sc, u32 address);
|
||||
void (*write32)(struct ath12k_base *sc, u32 address, u32 data);
|
||||
void (*irq_enable)(struct ath12k_base *sc);
|
||||
void (*irq_disable)(struct ath12k_base *sc);
|
||||
int (*start)(struct ath12k_base *sc);
|
||||
void (*stop)(struct ath12k_base *sc);
|
||||
int (*power_up)(struct ath12k_base *sc);
|
||||
void (*power_down)(struct ath12k_base *sc);
|
||||
u32 (*read32)(struct ath12k_base *ab, u32 address);
|
||||
void (*write32)(struct ath12k_base *ab, u32 address, u32 data);
|
||||
void (*irq_enable)(struct ath12k_base *ab);
|
||||
void (*irq_disable)(struct ath12k_base *ab);
|
||||
int (*start)(struct ath12k_base *ab);
|
||||
void (*stop)(struct ath12k_base *ab);
|
||||
int (*power_up)(struct ath12k_base *ab);
|
||||
void (*power_down)(struct ath12k_base *ab);
|
||||
int (*suspend)(struct ath12k_base *ab);
|
||||
int (*resume)(struct ath12k_base *ab);
|
||||
int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id,
|
||||
int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id,
|
||||
u8 *ul_pipe, u8 *dl_pipe);
|
||||
int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name,
|
||||
int *num_vectors, u32 *user_base_data,
|
||||
|
|
|
@ -373,6 +373,8 @@ static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
|
|||
{
|
||||
int i;
|
||||
|
||||
clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
|
||||
|
||||
for (i = 0; i < ab->hw_params->ce_count; i++) {
|
||||
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
|
||||
continue;
|
||||
|
@ -406,6 +408,10 @@ static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
|
|||
static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
|
||||
{
|
||||
struct ath12k_ce_pipe *ce_pipe = arg;
|
||||
struct ath12k_base *ab = ce_pipe->ab;
|
||||
|
||||
if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* last interrupt received for this CE */
|
||||
ce_pipe->timestamp = jiffies;
|
||||
|
@ -424,12 +430,15 @@ static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
|
|||
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
|
||||
}
|
||||
|
||||
static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc)
|
||||
static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
|
||||
struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
|
||||
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
|
||||
|
||||
ath12k_pci_ext_grp_disable(irq_grp);
|
||||
|
||||
|
@ -483,6 +492,10 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
|
|||
static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
|
||||
{
|
||||
struct ath12k_ext_irq_grp *irq_grp = arg;
|
||||
struct ath12k_base *ab = irq_grp->ab;
|
||||
|
||||
if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
|
||||
|
||||
|
@ -626,6 +639,8 @@ static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
|
|||
{
|
||||
int i;
|
||||
|
||||
set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
|
||||
|
||||
for (i = 0; i < ab->hw_params->ce_count; i++) {
|
||||
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
|
||||
continue;
|
||||
|
@ -956,6 +971,8 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
|
|||
{
|
||||
int i;
|
||||
|
||||
set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
|
||||
|
||||
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
|
||||
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
|
||||
|
||||
|
|
|
@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
|
|||
struct nvme_command *cmnd)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct bio_vec bv = rq_integrity_vec(req);
|
||||
|
||||
iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
|
||||
rq_dma_dir(req), 0);
|
||||
iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
|
||||
if (dma_mapping_error(dev->dev, iod->meta_dma))
|
||||
return BLK_STS_IOERR;
|
||||
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
|
||||
|
@ -969,7 +969,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
|
|||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
|
||||
dma_unmap_page(dev->dev, iod->meta_dma,
|
||||
rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
|
||||
rq_integrity_vec(req).bv_len, rq_dma_dir(req));
|
||||
}
|
||||
|
||||
if (blk_rq_nr_phys_segments(req))
|
||||
|
|
|
@ -173,8 +173,8 @@ static int doscan(void *data)
|
|||
*/
|
||||
static void ifs_test_core(int cpu, struct device *dev)
|
||||
{
|
||||
union ifs_status status = {};
|
||||
union ifs_scan activate;
|
||||
union ifs_status status;
|
||||
unsigned long timeout;
|
||||
struct ifs_data *ifsd;
|
||||
int to_start, to_stop;
|
||||
|
|
|
@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
|
|||
u8 reg_val;
|
||||
int ret;
|
||||
|
||||
if (cv <= CV_4100MV) {
|
||||
reg_val = CHRG_CCCV_CV_4100MV;
|
||||
cv = CV_4100MV;
|
||||
} else if (cv <= CV_4150MV) {
|
||||
reg_val = CHRG_CCCV_CV_4150MV;
|
||||
cv = CV_4150MV;
|
||||
} else if (cv <= CV_4200MV) {
|
||||
reg_val = CHRG_CCCV_CV_4200MV;
|
||||
cv = CV_4200MV;
|
||||
} else {
|
||||
if (cv >= CV_4350MV) {
|
||||
reg_val = CHRG_CCCV_CV_4350MV;
|
||||
cv = CV_4350MV;
|
||||
} else if (cv >= CV_4200MV) {
|
||||
reg_val = CHRG_CCCV_CV_4200MV;
|
||||
cv = CV_4200MV;
|
||||
} else if (cv >= CV_4150MV) {
|
||||
reg_val = CHRG_CCCV_CV_4150MV;
|
||||
cv = CV_4150MV;
|
||||
} else {
|
||||
reg_val = CHRG_CCCV_CV_4100MV;
|
||||
cv = CV_4100MV;
|
||||
}
|
||||
|
||||
reg_val = reg_val << CHRG_CCCV_CV_BIT_POS;
|
||||
|
@ -337,8 +337,8 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
|
|||
}
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
|
||||
scaled_val = min(val->intval, info->max_cv);
|
||||
scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000);
|
||||
scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000);
|
||||
scaled_val = min(scaled_val, info->max_cv);
|
||||
ret = axp288_charger_set_cv(info, scaled_val);
|
||||
if (ret < 0) {
|
||||
dev_warn(&info->pdev->dev, "set charge voltage failed\n");
|
||||
|
|
|
@ -486,7 +486,7 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
|
|||
int ret;
|
||||
|
||||
if (!battmgr->service_up)
|
||||
return -ENODEV;
|
||||
return -EAGAIN;
|
||||
|
||||
if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
|
||||
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
|
||||
|
@ -683,7 +683,7 @@ static int qcom_battmgr_ac_get_property(struct power_supply *psy,
|
|||
int ret;
|
||||
|
||||
if (!battmgr->service_up)
|
||||
return -ENODEV;
|
||||
return -EAGAIN;
|
||||
|
||||
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
|
||||
if (ret)
|
||||
|
@ -748,7 +748,7 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy,
|
|||
int ret;
|
||||
|
||||
if (!battmgr->service_up)
|
||||
return -ENODEV;
|
||||
return -EAGAIN;
|
||||
|
||||
if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
|
||||
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
|
||||
|
@ -867,7 +867,7 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy,
|
|||
int ret;
|
||||
|
||||
if (!battmgr->service_up)
|
||||
return -ENODEV;
|
||||
return -EAGAIN;
|
||||
|
||||
if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
|
||||
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
|
||||
|
|
|
@ -320,8 +320,14 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
|
|||
&esize);
|
||||
if (rc) {
|
||||
/* Cancel running request if interrupted */
|
||||
if (rc == -ERESTARTSYS)
|
||||
sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
|
||||
if (rc == -ERESTARTSYS) {
|
||||
if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) {
|
||||
pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n",
|
||||
(size_t)dsize * PAGE_SIZE);
|
||||
data = NULL;
|
||||
asce = 0;
|
||||
}
|
||||
}
|
||||
vfree(data);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -3447,6 +3447,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
|
|||
scmd->sc_data_direction);
|
||||
priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
|
||||
} else {
|
||||
/*
|
||||
* Some firmware versions byte-swap the REPORT ZONES command
|
||||
* reply from ATA-ZAC devices by directly accessing in the host
|
||||
* buffer. This does not respect the default command DMA
|
||||
* direction and causes IOMMU page faults on some architectures
|
||||
* with an IOMMU enforcing write mappings (e.g. AMD hosts).
|
||||
* Avoid such issue by making the REPORT ZONES buffer mapping
|
||||
* bi-directional.
|
||||
*/
|
||||
if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
|
||||
scmd->sc_data_direction = DMA_BIDIRECTIONAL;
|
||||
sg_scmd = scsi_sglist(scmd);
|
||||
sges_left = scsi_dma_map(scmd);
|
||||
}
|
||||
|
|
|
@ -2671,6 +2671,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
|
|||
_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
|
||||
}
|
||||
|
||||
static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd)
|
||||
{
|
||||
/*
|
||||
* Some firmware versions byte-swap the REPORT ZONES command reply from
|
||||
* ATA-ZAC devices by directly accessing in the host buffer. This does
|
||||
* not respect the default command DMA direction and causes IOMMU page
|
||||
* faults on some architectures with an IOMMU enforcing write mappings
|
||||
* (e.g. AMD hosts). Avoid such issue by making the report zones buffer
|
||||
* mapping bi-directional.
|
||||
*/
|
||||
if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES)
|
||||
cmd->sc_data_direction = DMA_BIDIRECTIONAL;
|
||||
|
||||
return scsi_dma_map(cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_build_sg_scmd - main sg creation routine
|
||||
* pcie_device is unused here!
|
||||
|
@ -2717,7 +2733,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
|
|||
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
|
||||
|
||||
sg_scmd = scsi_sglist(scmd);
|
||||
sges_left = scsi_dma_map(scmd);
|
||||
sges_left = _base_scsi_dma_map(scmd);
|
||||
if (sges_left < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2861,7 +2877,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
|
|||
}
|
||||
|
||||
sg_scmd = scsi_sglist(scmd);
|
||||
sges_left = scsi_dma_map(scmd);
|
||||
sges_left = _base_scsi_dma_map(scmd);
|
||||
if (sges_left < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
|
|||
static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
|
||||
{
|
||||
struct lpspi_config config = fsl_lpspi->config;
|
||||
unsigned int perclk_rate, scldiv;
|
||||
unsigned int perclk_rate, scldiv, div;
|
||||
u8 prescale;
|
||||
|
||||
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
|
||||
|
@ -313,8 +313,10 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
|
||||
|
||||
for (prescale = 0; prescale < 8; prescale++) {
|
||||
scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
|
||||
scldiv = div / (1 << prescale) - 2;
|
||||
if (scldiv < 256) {
|
||||
fsl_lpspi->config.prescale = prescale;
|
||||
break;
|
||||
|
|
|
@ -704,6 +704,7 @@ static const struct file_operations spidev_fops = {
|
|||
static struct class *spidev_class;
|
||||
|
||||
static const struct spi_device_id spidev_spi_ids[] = {
|
||||
{ .name = "bh2228fv" },
|
||||
{ .name = "dh2228fv" },
|
||||
{ .name = "ltc2488" },
|
||||
{ .name = "sx1301" },
|
||||
|
|
|
@ -876,6 +876,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
|
|||
new_flags = (__force upf_t)new_info->flags;
|
||||
old_custom_divisor = uport->custom_divisor;
|
||||
|
||||
if (!(uport->flags & UPF_FIXED_PORT)) {
|
||||
unsigned int uartclk = new_info->baud_base * 16;
|
||||
/* check needs to be done here before other settings made */
|
||||
if (uartclk == 0) {
|
||||
retval = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
retval = -EPERM;
|
||||
if (change_irq || change_port ||
|
||||
|
|
|
@ -3971,11 +3971,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
|
|||
min_sleep_time_us =
|
||||
MIN_DELAY_BEFORE_DME_CMDS_US - delta;
|
||||
else
|
||||
return; /* no more delay required */
|
||||
min_sleep_time_us = 0; /* no more delay required */
|
||||
}
|
||||
|
||||
/* allow sleep for extra 50us if needed */
|
||||
usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
|
||||
if (min_sleep_time_us > 0) {
|
||||
/* allow sleep for extra 50us if needed */
|
||||
usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
|
||||
}
|
||||
|
||||
/* update the last_dme_cmd_tstamp */
|
||||
hba->last_dme_cmd_tstamp = ktime_get();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -10157,9 +10162,6 @@ int ufshcd_system_restore(struct device *dev)
|
|||
*/
|
||||
ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
|
||||
|
||||
/* Resuming from hibernate, assume that link was OFF */
|
||||
ufshcd_set_link_off(hba);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
|
|
@ -642,12 +642,21 @@ static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data)
|
|||
if (format)
|
||||
return; // invalid
|
||||
blk = (*data >> 8) & 0xff;
|
||||
if (blk >= ep->num_blks)
|
||||
return;
|
||||
if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
|
||||
reply_ump_stream_fb_info(ep, blk);
|
||||
if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
|
||||
reply_ump_stream_fb_name(ep, blk);
|
||||
if (blk == 0xff) {
|
||||
/* inquiry for all blocks */
|
||||
for (blk = 0; blk < ep->num_blks; blk++) {
|
||||
if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
|
||||
reply_ump_stream_fb_info(ep, blk);
|
||||
if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
|
||||
reply_ump_stream_fb_name(ep, blk);
|
||||
}
|
||||
} else if (blk < ep->num_blks) {
|
||||
/* only the specified block */
|
||||
if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
|
||||
reply_ump_stream_fb_info(ep, blk);
|
||||
if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
|
||||
reply_ump_stream_fb_name(ep, blk);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -592,16 +592,25 @@ int u_audio_start_capture(struct g_audio *audio_dev)
|
|||
struct usb_ep *ep, *ep_fback;
|
||||
struct uac_rtd_params *prm;
|
||||
struct uac_params *params = &audio_dev->params;
|
||||
int req_len, i;
|
||||
int req_len, i, ret;
|
||||
|
||||
prm = &uac->c_prm;
|
||||
dev_dbg(dev, "start capture with rate %d\n", prm->srate);
|
||||
ep = audio_dev->out_ep;
|
||||
config_ep_by_speed(gadget, &audio_dev->func, ep);
|
||||
ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
req_len = ep->maxpacket;
|
||||
|
||||
prm->ep_enabled = true;
|
||||
usb_ep_enable(ep);
|
||||
ret = usb_ep_enable(ep);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < params->req_number; i++) {
|
||||
if (!prm->reqs[i]) {
|
||||
|
@ -629,9 +638,18 @@ int u_audio_start_capture(struct g_audio *audio_dev)
|
|||
return 0;
|
||||
|
||||
/* Setup feedback endpoint */
|
||||
config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
|
||||
ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret);
|
||||
return ret; // TODO: Clean up out_ep
|
||||
}
|
||||
|
||||
prm->fb_ep_enabled = true;
|
||||
usb_ep_enable(ep_fback);
|
||||
ret = usb_ep_enable(ep_fback);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret);
|
||||
return ret; // TODO: Clean up out_ep
|
||||
}
|
||||
req_len = ep_fback->maxpacket;
|
||||
|
||||
req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC);
|
||||
|
@ -687,13 +705,17 @@ int u_audio_start_playback(struct g_audio *audio_dev)
|
|||
struct uac_params *params = &audio_dev->params;
|
||||
unsigned int factor;
|
||||
const struct usb_endpoint_descriptor *ep_desc;
|
||||
int req_len, i;
|
||||
int req_len, i, ret;
|
||||
unsigned int p_pktsize;
|
||||
|
||||
prm = &uac->p_prm;
|
||||
dev_dbg(dev, "start playback with rate %d\n", prm->srate);
|
||||
ep = audio_dev->in_ep;
|
||||
config_ep_by_speed(gadget, &audio_dev->func, ep);
|
||||
ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ep_desc = ep->desc;
|
||||
/*
|
||||
|
@ -720,7 +742,11 @@ int u_audio_start_playback(struct g_audio *audio_dev)
|
|||
uac->p_residue_mil = 0;
|
||||
|
||||
prm->ep_enabled = true;
|
||||
usb_ep_enable(ep);
|
||||
ret = usb_ep_enable(ep);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < params->req_number; i++) {
|
||||
if (!prm->reqs[i]) {
|
||||
|
|
|
@ -1441,6 +1441,7 @@ void gserial_suspend(struct gserial *gser)
|
|||
spin_lock(&port->port_lock);
|
||||
spin_unlock(&serial_port_lock);
|
||||
port->suspended = true;
|
||||
port->start_delayed = true;
|
||||
spin_unlock_irqrestore(&port->port_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gserial_suspend);
|
||||
|
|
|
@ -118,12 +118,10 @@ int usb_ep_enable(struct usb_ep *ep)
|
|||
goto out;
|
||||
|
||||
/* UDC drivers can't handle endpoints with maxpacket size 0 */
|
||||
if (usb_endpoint_maxp(ep->desc) == 0) {
|
||||
/*
|
||||
* We should log an error message here, but we can't call
|
||||
* dev_err() because there's no way to find the gadget
|
||||
* given only ep.
|
||||
*/
|
||||
if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) {
|
||||
WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name,
|
||||
(!ep->desc) ? "NULL descriptor" : "maxpacket 0");
|
||||
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -76,6 +76,11 @@ static void usb_debug_process_read_urb(struct urb *urb)
|
|||
usb_serial_generic_process_read_urb(urb);
|
||||
}
|
||||
|
||||
static void usb_debug_init_termios(struct tty_struct *tty)
|
||||
{
|
||||
tty->termios.c_lflag &= ~(ECHO | ECHONL);
|
||||
}
|
||||
|
||||
static struct usb_serial_driver debug_device = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -85,6 +90,7 @@ static struct usb_serial_driver debug_device = {
|
|||
.num_ports = 1,
|
||||
.bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
|
||||
.break_ctl = usb_debug_break_ctl,
|
||||
.init_termios = usb_debug_init_termios,
|
||||
.process_read_urb = usb_debug_process_read_urb,
|
||||
};
|
||||
|
||||
|
@ -96,6 +102,7 @@ static struct usb_serial_driver dbc_device = {
|
|||
.id_table = dbc_id_table,
|
||||
.num_ports = 1,
|
||||
.break_ctl = usb_debug_break_ctl,
|
||||
.init_termios = usb_debug_init_termios,
|
||||
.process_read_urb = usb_debug_process_read_urb,
|
||||
};
|
||||
|
||||
|
|
|
@ -745,6 +745,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
|
|||
*
|
||||
*/
|
||||
if (usb_pipedevice(urb->pipe) == 0) {
|
||||
struct usb_device *old;
|
||||
__u8 type = usb_pipetype(urb->pipe);
|
||||
struct usb_ctrlrequest *ctrlreq =
|
||||
(struct usb_ctrlrequest *) urb->setup_packet;
|
||||
|
@ -755,14 +756,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
|
|||
goto no_need_xmit;
|
||||
}
|
||||
|
||||
old = vdev->udev;
|
||||
switch (ctrlreq->bRequest) {
|
||||
case USB_REQ_SET_ADDRESS:
|
||||
/* set_address may come when a device is reset */
|
||||
dev_info(dev, "SetAddress Request (%d) to port %d\n",
|
||||
ctrlreq->wValue, vdev->rhport);
|
||||
|
||||
usb_put_dev(vdev->udev);
|
||||
vdev->udev = usb_get_dev(urb->dev);
|
||||
usb_put_dev(old);
|
||||
|
||||
spin_lock(&vdev->ud.lock);
|
||||
vdev->ud.status = VDEV_ST_USED;
|
||||
|
@ -781,8 +783,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
|
|||
usbip_dbg_vhci_hc(
|
||||
"Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
|
||||
|
||||
usb_put_dev(vdev->udev);
|
||||
vdev->udev = usb_get_dev(urb->dev);
|
||||
usb_put_dev(old);
|
||||
goto out;
|
||||
|
||||
default:
|
||||
|
@ -1067,6 +1069,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
|
|||
static void vhci_device_reset(struct usbip_device *ud)
|
||||
{
|
||||
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
|
||||
struct usb_device *old = vdev->udev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ud->lock, flags);
|
||||
|
@ -1074,8 +1077,8 @@ static void vhci_device_reset(struct usbip_device *ud)
|
|||
vdev->speed = 0;
|
||||
vdev->devid = 0;
|
||||
|
||||
usb_put_dev(vdev->udev);
|
||||
vdev->udev = NULL;
|
||||
usb_put_dev(old);
|
||||
|
||||
if (ud->tcp_socket) {
|
||||
sockfd_put(ud->tcp_socket);
|
||||
|
|
|
@ -1378,13 +1378,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
|
|||
|
||||
notify = ops->get_vq_notification(vdpa, index);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
|
||||
PFN_DOWN(notify.addr), PAGE_SIZE,
|
||||
vma->vm_page_prot))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vhost_vdpa_vm_ops = {
|
||||
|
|
|
@ -841,7 +841,7 @@ out:
|
|||
#ifdef CONFIG_XEN_PRIVCMD_IRQFD
|
||||
/* Irqfd support */
|
||||
static struct workqueue_struct *irqfd_cleanup_wq;
|
||||
static DEFINE_MUTEX(irqfds_lock);
|
||||
static DEFINE_SPINLOCK(irqfds_lock);
|
||||
static LIST_HEAD(irqfds_list);
|
||||
|
||||
struct privcmd_kernel_irqfd {
|
||||
|
@ -905,9 +905,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
|
|||
irqfd_inject(kirqfd);
|
||||
|
||||
if (flags & EPOLLHUP) {
|
||||
mutex_lock(&irqfds_lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
irqfd_deactivate(kirqfd);
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -925,6 +927,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
|
|||
static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
|
||||
{
|
||||
struct privcmd_kernel_irqfd *kirqfd, *tmp;
|
||||
unsigned long flags;
|
||||
__poll_t events;
|
||||
struct fd f;
|
||||
void *dm_op;
|
||||
|
@ -964,18 +967,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
|
|||
init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
|
||||
init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
|
||||
|
||||
mutex_lock(&irqfds_lock);
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
|
||||
list_for_each_entry(tmp, &irqfds_list, list) {
|
||||
if (kirqfd->eventfd == tmp->eventfd) {
|
||||
ret = -EBUSY;
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
goto error_eventfd;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail(&kirqfd->list, &irqfds_list);
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
|
||||
/*
|
||||
* Check if there was an event already pending on the eventfd before we
|
||||
|
@ -1007,12 +1010,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
|
|||
{
|
||||
struct privcmd_kernel_irqfd *kirqfd;
|
||||
struct eventfd_ctx *eventfd;
|
||||
unsigned long flags;
|
||||
|
||||
eventfd = eventfd_ctx_fdget(irqfd->fd);
|
||||
if (IS_ERR(eventfd))
|
||||
return PTR_ERR(eventfd);
|
||||
|
||||
mutex_lock(&irqfds_lock);
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
|
||||
list_for_each_entry(kirqfd, &irqfds_list, list) {
|
||||
if (kirqfd->eventfd == eventfd) {
|
||||
|
@ -1021,7 +1025,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
|
||||
eventfd_ctx_put(eventfd);
|
||||
|
||||
|
@ -1069,13 +1073,14 @@ static int privcmd_irqfd_init(void)
|
|||
static void privcmd_irqfd_exit(void)
|
||||
{
|
||||
struct privcmd_kernel_irqfd *kirqfd, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&irqfds_lock);
|
||||
spin_lock_irqsave(&irqfds_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
|
||||
irqfd_deactivate(kirqfd);
|
||||
|
||||
mutex_unlock(&irqfds_lock);
|
||||
spin_unlock_irqrestore(&irqfds_lock, flags);
|
||||
|
||||
destroy_workqueue(irqfd_cleanup_wq);
|
||||
}
|
||||
|
|
|
@ -445,6 +445,7 @@ struct btrfs_file_private {
|
|||
void *filldir_buf;
|
||||
u64 last_index;
|
||||
struct extent_state *llseek_cached_state;
|
||||
bool fsync_skip_inode_lock;
|
||||
};
|
||||
|
||||
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
|
||||
|
|
|
@ -2172,10 +2172,8 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page,
|
|||
|
||||
page = find_get_page(mapping, cur >> PAGE_SHIFT);
|
||||
ASSERT(PageLocked(page));
|
||||
if (pages_dirty && page != locked_page) {
|
||||
if (pages_dirty && page != locked_page)
|
||||
ASSERT(PageDirty(page));
|
||||
clear_page_dirty_for_io(page);
|
||||
}
|
||||
|
||||
ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
|
||||
i_size, &nr);
|
||||
|
|
|
@ -1535,21 +1535,37 @@ relock:
|
|||
* So here we disable page faults in the iov_iter and then retry if we
|
||||
* got -EFAULT, faulting in the pages before the retry.
|
||||
*/
|
||||
again:
|
||||
from->nofault = true;
|
||||
dio = btrfs_dio_write(iocb, from, written);
|
||||
from->nofault = false;
|
||||
|
||||
/*
|
||||
* iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
|
||||
* iocb, and that needs to lock the inode. So unlock it before calling
|
||||
* iomap_dio_complete() to avoid a deadlock.
|
||||
*/
|
||||
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
|
||||
|
||||
if (IS_ERR_OR_NULL(dio))
|
||||
if (IS_ERR_OR_NULL(dio)) {
|
||||
err = PTR_ERR_OR_ZERO(dio);
|
||||
else
|
||||
} else {
|
||||
struct btrfs_file_private stack_private = { 0 };
|
||||
struct btrfs_file_private *private;
|
||||
const bool have_private = (file->private_data != NULL);
|
||||
|
||||
if (!have_private)
|
||||
file->private_data = &stack_private;
|
||||
|
||||
/*
|
||||
* If we have a synchoronous write, we must make sure the fsync
|
||||
* triggered by the iomap_dio_complete() call below doesn't
|
||||
* deadlock on the inode lock - we are already holding it and we
|
||||
* can't call it after unlocking because we may need to complete
|
||||
* partial writes due to the input buffer (or parts of it) not
|
||||
* being already faulted in.
|
||||
*/
|
||||
private = file->private_data;
|
||||
private->fsync_skip_inode_lock = true;
|
||||
err = iomap_dio_complete(dio);
|
||||
private->fsync_skip_inode_lock = false;
|
||||
|
||||
if (!have_private)
|
||||
file->private_data = NULL;
|
||||
}
|
||||
|
||||
/* No increment (+=) because iomap returns a cumulative value. */
|
||||
if (err > 0)
|
||||
|
@ -1576,10 +1592,12 @@ relock:
|
|||
} else {
|
||||
fault_in_iov_iter_readable(from, left);
|
||||
prev_left = left;
|
||||
goto relock;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
|
||||
|
||||
/*
|
||||
* If 'err' is -ENOTBLK or we have not written all data, then it means
|
||||
* we must fallback to buffered IO.
|
||||
|
@ -1778,6 +1796,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
|
|||
*/
|
||||
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
{
|
||||
struct btrfs_file_private *private = file->private_data;
|
||||
struct dentry *dentry = file_dentry(file);
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
|
@ -1787,6 +1806,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
int ret = 0, err;
|
||||
u64 len;
|
||||
bool full_sync;
|
||||
const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
|
||||
|
||||
trace_btrfs_sync_file(file, datasync);
|
||||
|
||||
|
@ -1814,7 +1834,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
if (skip_ilock)
|
||||
down_write(&BTRFS_I(inode)->i_mmap_lock);
|
||||
else
|
||||
btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
|
||||
atomic_inc(&root->log_batch);
|
||||
|
||||
|
@ -1838,7 +1861,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
*/
|
||||
ret = start_ordered_ops(inode, start, end);
|
||||
if (ret) {
|
||||
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
if (skip_ilock)
|
||||
up_write(&BTRFS_I(inode)->i_mmap_lock);
|
||||
else
|
||||
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1941,7 +1967,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
* file again, but that will end up using the synchronization
|
||||
* inside btrfs_sync_log to keep things safe.
|
||||
*/
|
||||
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
if (skip_ilock)
|
||||
up_write(&BTRFS_I(inode)->i_mmap_lock);
|
||||
else
|
||||
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
|
||||
if (ret == BTRFS_NO_LOG_SYNC) {
|
||||
ret = btrfs_end_transaction(trans);
|
||||
|
@ -2009,7 +2038,10 @@ out:
|
|||
|
||||
out_release_extents:
|
||||
btrfs_release_log_ctx_extents(&ctx);
|
||||
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
if (skip_ilock)
|
||||
up_write(&BTRFS_I(inode)->i_mmap_lock);
|
||||
else
|
||||
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue