Merge linux 6.6.29
This commit is contained in:
commit
1f2d7cadb2
|
@ -6858,6 +6858,9 @@
|
||||||
pause after every control message);
|
pause after every control message);
|
||||||
o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
|
o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
|
||||||
delay after resetting its port);
|
delay after resetting its port);
|
||||||
|
p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
|
||||||
|
(Reduce timeout of the SET_ADDRESS
|
||||||
|
request from 5000 ms to 500 ms);
|
||||||
Example: quirks=0781:5580:bk,0a5c:5834:gij
|
Example: quirks=0781:5580:bk,0a5c:5834:gij
|
||||||
|
|
||||||
usbhid.mousepoll=
|
usbhid.mousepoll=
|
||||||
|
|
|
@ -8142,7 +8142,7 @@ M: Geoffrey D. Bennett <g@b4.vu>
|
||||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
|
||||||
F: sound/usb/mixer_scarlett_gen2.c
|
F: sound/usb/mixer_scarlett2.c
|
||||||
|
|
||||||
FORCEDETH GIGABIT ETHERNET DRIVER
|
FORCEDETH GIGABIT ETHERNET DRIVER
|
||||||
M: Rain River <rain.1986.08.12@gmail.com>
|
M: Rain River <rain.1986.08.12@gmail.com>
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ else
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 28
|
SUBLEVEL = 29
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Hurr durr I'ma ninja sloth
|
NAME = Hurr durr I'ma ninja sloth
|
||||||
|
|
||||||
|
|
|
@ -275,9 +275,19 @@ static struct platform_device pandora_backlight = {
|
||||||
.id = -1,
|
.id = -1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct gpiod_lookup_table pandora_soc_audio_gpios = {
|
||||||
|
.dev_id = "soc-audio",
|
||||||
|
.table = {
|
||||||
|
GPIO_LOOKUP("gpio-112-127", 6, "dac", GPIO_ACTIVE_HIGH),
|
||||||
|
GPIO_LOOKUP("gpio-0-15", 14, "amp", GPIO_ACTIVE_HIGH),
|
||||||
|
{ }
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
static void __init omap3_pandora_legacy_init(void)
|
static void __init omap3_pandora_legacy_init(void)
|
||||||
{
|
{
|
||||||
platform_device_register(&pandora_backlight);
|
platform_device_register(&pandora_backlight);
|
||||||
|
gpiod_add_lookup_table(&pandora_soc_audio_gpios);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ARCH_OMAP3 */
|
#endif /* CONFIG_ARCH_OMAP3 */
|
||||||
|
|
||||||
|
|
|
@ -152,12 +152,18 @@ static inline unsigned long get_trans_granule(void)
|
||||||
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
|
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generate 'num' values from -1 to 30 with -1 rejected by the
|
* Generate 'num' values from -1 to 31 with -1 rejected by the
|
||||||
* __flush_tlb_range() loop below.
|
* __flush_tlb_range() loop below. Its return value is only
|
||||||
|
* significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
|
||||||
|
* 'pages' is more than that, you must iterate over the overall
|
||||||
|
* range.
|
||||||
*/
|
*/
|
||||||
#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
|
#define __TLBI_RANGE_NUM(pages, scale) \
|
||||||
#define __TLBI_RANGE_NUM(pages, scale) \
|
({ \
|
||||||
((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
|
int __pages = min((pages), \
|
||||||
|
__TLBI_RANGE_PAGES(31, (scale))); \
|
||||||
|
(__pages >> (5 * (scale) + 1)) - 1; \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TLB Invalidation
|
* TLB Invalidation
|
||||||
|
@ -351,29 +357,25 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||||
* entries one by one at the granularity of 'stride'. If the TLB
|
* entries one by one at the granularity of 'stride'. If the TLB
|
||||||
* range ops are supported, then:
|
* range ops are supported, then:
|
||||||
*
|
*
|
||||||
* 1. If 'pages' is odd, flush the first page through non-range
|
* 1. The minimum range granularity is decided by 'scale', so multiple range
|
||||||
* operations;
|
* TLBI operations may be required. Start from scale = 3, flush the largest
|
||||||
|
* possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
|
||||||
|
* requested range, then decrement scale and continue until one or zero pages
|
||||||
|
* are left.
|
||||||
*
|
*
|
||||||
* 2. For remaining pages: the minimum range granularity is decided
|
* 2. If there is 1 page remaining, flush it through non-range operations. Range
|
||||||
* by 'scale', so multiple range TLBI operations may be required.
|
* operations can only span an even number of pages.
|
||||||
* Start from scale = 0, flush the corresponding number of pages
|
|
||||||
* ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
|
|
||||||
* until no pages left.
|
|
||||||
*
|
|
||||||
* Note that certain ranges can be represented by either num = 31 and
|
|
||||||
* scale or num = 0 and scale + 1. The loop below favours the latter
|
|
||||||
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
|
|
||||||
*/
|
*/
|
||||||
#define __flush_tlb_range_op(op, start, pages, stride, \
|
#define __flush_tlb_range_op(op, start, pages, stride, \
|
||||||
asid, tlb_level, tlbi_user) \
|
asid, tlb_level, tlbi_user) \
|
||||||
do { \
|
do { \
|
||||||
int num = 0; \
|
int num = 0; \
|
||||||
int scale = 0; \
|
int scale = 3; \
|
||||||
unsigned long addr; \
|
unsigned long addr; \
|
||||||
\
|
\
|
||||||
while (pages > 0) { \
|
while (pages > 0) { \
|
||||||
if (!system_supports_tlb_range() || \
|
if (!system_supports_tlb_range() || \
|
||||||
pages % 2 == 1) { \
|
pages == 1) { \
|
||||||
addr = __TLBI_VADDR(start, asid); \
|
addr = __TLBI_VADDR(start, asid); \
|
||||||
__tlbi_level(op, addr, tlb_level); \
|
__tlbi_level(op, addr, tlb_level); \
|
||||||
if (tlbi_user) \
|
if (tlbi_user) \
|
||||||
|
@ -393,7 +395,7 @@ do { \
|
||||||
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
||||||
pages -= __TLBI_RANGE_PAGES(num, scale); \
|
pages -= __TLBI_RANGE_PAGES(num, scale); \
|
||||||
} \
|
} \
|
||||||
scale++; \
|
scale--; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||||
adr_l x1, __hyp_text_end
|
adr_l x1, __hyp_text_end
|
||||||
adr_l x2, dcache_clean_poc
|
adr_l x2, dcache_clean_poc
|
||||||
blr x2
|
blr x2
|
||||||
|
|
||||||
|
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
|
||||||
|
pre_disable_mmu_workaround
|
||||||
|
msr sctlr_el2, x0
|
||||||
|
isb
|
||||||
0:
|
0:
|
||||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||||
msr hcr_el2, x0
|
msr hcr_el2, x0
|
||||||
|
|
|
@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
unsigned long addr = (unsigned long)page_address(page);
|
unsigned long addr = (unsigned long)page_address(page);
|
||||||
|
|
||||||
if (!can_set_direct_map())
|
|
||||||
return true;
|
|
||||||
|
|
||||||
pgdp = pgd_offset_k(addr);
|
pgdp = pgd_offset_k(addr);
|
||||||
if (pgd_none(READ_ONCE(*pgdp)))
|
if (pgd_none(READ_ONCE(*pgdp)))
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -20,14 +20,6 @@
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
extern void _mcount(void);
|
extern void _mcount(void);
|
||||||
|
|
||||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
|
||||||
{
|
|
||||||
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
|
|
||||||
addr += MCOUNT_INSN_SIZE;
|
|
||||||
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
|
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
|
||||||
unsigned long sp);
|
unsigned long sp);
|
||||||
|
|
||||||
|
@ -142,8 +134,10 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
|
||||||
#ifdef CONFIG_FUNCTION_TRACER
|
#ifdef CONFIG_FUNCTION_TRACER
|
||||||
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
|
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
|
||||||
void ftrace_free_init_tramp(void);
|
void ftrace_free_init_tramp(void);
|
||||||
|
unsigned long ftrace_call_adjust(unsigned long addr);
|
||||||
#else
|
#else
|
||||||
static inline void ftrace_free_init_tramp(void) { }
|
static inline void ftrace_free_init_tramp(void) { }
|
||||||
|
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
|
||||||
#endif
|
#endif
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
|
||||||
|
|
||||||
extern char __head_end[];
|
extern char __head_end[];
|
||||||
extern char __srwx_boundary[];
|
extern char __srwx_boundary[];
|
||||||
|
extern char __exittext_begin[], __exittext_end[];
|
||||||
|
|
||||||
/* Patch sites */
|
/* Patch sites */
|
||||||
extern s32 patch__call_flush_branch_caches1;
|
extern s32 patch__call_flush_branch_caches1;
|
||||||
|
|
|
@ -27,10 +27,22 @@
|
||||||
#include <asm/ftrace.h>
|
#include <asm/ftrace.h>
|
||||||
#include <asm/syscall.h>
|
#include <asm/syscall.h>
|
||||||
#include <asm/inst.h>
|
#include <asm/inst.h>
|
||||||
|
#include <asm/sections.h>
|
||||||
|
|
||||||
#define NUM_FTRACE_TRAMPS 2
|
#define NUM_FTRACE_TRAMPS 2
|
||||||
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
|
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
|
||||||
|
|
||||||
|
unsigned long ftrace_call_adjust(unsigned long addr)
|
||||||
|
{
|
||||||
|
if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
|
||||||
|
addr += MCOUNT_INSN_SIZE;
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
|
static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
|
||||||
{
|
{
|
||||||
ppc_inst_t op;
|
ppc_inst_t op;
|
||||||
|
|
|
@ -37,6 +37,11 @@
|
||||||
#define NUM_FTRACE_TRAMPS 8
|
#define NUM_FTRACE_TRAMPS 8
|
||||||
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
|
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
|
||||||
|
|
||||||
|
unsigned long ftrace_call_adjust(unsigned long addr)
|
||||||
|
{
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
static ppc_inst_t
|
static ppc_inst_t
|
||||||
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
|
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
|
||||||
{
|
{
|
||||||
|
|
|
@ -281,7 +281,9 @@ SECTIONS
|
||||||
* to deal with references from __bug_table
|
* to deal with references from __bug_table
|
||||||
*/
|
*/
|
||||||
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
|
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
|
||||||
|
__exittext_begin = .;
|
||||||
EXIT_TEXT
|
EXIT_TEXT
|
||||||
|
__exittext_end = .;
|
||||||
}
|
}
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
|
|
|
@ -79,6 +79,9 @@ do { \
|
||||||
#define __smp_mb__before_atomic() do { } while (0)
|
#define __smp_mb__before_atomic() do { } while (0)
|
||||||
#define __smp_mb__after_atomic() do { } while (0)
|
#define __smp_mb__after_atomic() do { } while (0)
|
||||||
|
|
||||||
|
/* Writing to CR3 provides a full memory barrier in switch_mm(). */
|
||||||
|
#define smp_mb__after_switch_mm() do { } while (0)
|
||||||
|
|
||||||
#include <asm-generic/barrier.h>
|
#include <asm-generic/barrier.h>
|
||||||
|
|
||||||
#endif /* _ASM_X86_BARRIER_H */
|
#endif /* _ASM_X86_BARRIER_H */
|
||||||
|
|
|
@ -828,6 +828,7 @@ struct kvm_vcpu_arch {
|
||||||
int cpuid_nent;
|
int cpuid_nent;
|
||||||
struct kvm_cpuid_entry2 *cpuid_entries;
|
struct kvm_cpuid_entry2 *cpuid_entries;
|
||||||
struct kvm_hypervisor_cpuid kvm_cpuid;
|
struct kvm_hypervisor_cpuid kvm_cpuid;
|
||||||
|
bool is_amd_compatible;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
|
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
|
||||||
|
|
|
@ -1651,7 +1651,8 @@ static void __init bhi_select_mitigation(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
|
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||||
spec_ctrl_disable_kernel_rrsba();
|
spec_ctrl_disable_kernel_rrsba();
|
||||||
if (rrsba_disabled)
|
if (rrsba_disabled)
|
||||||
return;
|
return;
|
||||||
|
@ -2803,11 +2804,13 @@ static const char *spectre_bhi_state(void)
|
||||||
{
|
{
|
||||||
if (!boot_cpu_has_bug(X86_BUG_BHI))
|
if (!boot_cpu_has_bug(X86_BUG_BHI))
|
||||||
return "; BHI: Not affected";
|
return "; BHI: Not affected";
|
||||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
|
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
|
||||||
return "; BHI: BHI_DIS_S";
|
return "; BHI: BHI_DIS_S";
|
||||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
|
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
|
||||||
return "; BHI: SW loop, KVM: SW loop";
|
return "; BHI: SW loop, KVM: SW loop";
|
||||||
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
|
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
|
||||||
|
rrsba_disabled)
|
||||||
return "; BHI: Retpoline";
|
return "; BHI: Retpoline";
|
||||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
|
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
|
||||||
return "; BHI: Vulnerable, KVM: SW loop";
|
return "; BHI: Vulnerable, KVM: SW loop";
|
||||||
|
|
|
@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
|
||||||
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
|
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
|
||||||
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
|
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
|
||||||
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
|
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
|
||||||
|
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
|
||||||
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
|
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
|
||||||
|
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
|
||||||
|
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
|
||||||
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
|
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
|
||||||
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
|
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
|
||||||
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
|
||||||
|
@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
|
||||||
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
|
||||||
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
|
||||||
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
|
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
|
||||||
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
|
|
||||||
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
|
|
||||||
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
|
|
||||||
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
|
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
|
||||||
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
|
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
|
||||||
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
|
||||||
|
|
|
@ -362,6 +362,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
kvm_update_pv_runtime(vcpu);
|
kvm_update_pv_runtime(vcpu);
|
||||||
|
|
||||||
|
vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
|
||||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||||
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
|
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -125,6 +125,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
|
||||||
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
|
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return vcpu->arch.is_amd_compatible;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return !guest_cpuid_is_amd_compatible(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
|
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *best;
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
|
|
@ -2772,7 +2772,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
|
||||||
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
|
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
|
||||||
|
|
||||||
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
|
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
|
||||||
if (r && lvt_type == APIC_LVTPC)
|
if (r && lvt_type == APIC_LVTPC &&
|
||||||
|
guest_cpuid_is_intel_compatible(apic->vcpu))
|
||||||
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
|
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4788,7 +4788,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
|
||||||
context->cpu_role.base.level, is_efer_nx(context),
|
context->cpu_role.base.level, is_efer_nx(context),
|
||||||
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
|
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
|
||||||
is_cr4_pse(context),
|
is_cr4_pse(context),
|
||||||
guest_cpuid_is_amd_or_hygon(vcpu));
|
guest_cpuid_is_amd_compatible(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
|
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
|
||||||
|
|
|
@ -1506,6 +1506,16 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* All TDP MMU shadow pages share the same role as their root, aside
|
||||||
|
* from level, so it is valid to key off any shadow page to determine if
|
||||||
|
* write protection is needed for an entire tree.
|
||||||
|
*/
|
||||||
|
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
|
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
|
||||||
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
|
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
|
||||||
|
@ -1516,7 +1526,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
|
||||||
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
gfn_t start, gfn_t end)
|
gfn_t start, gfn_t end)
|
||||||
{
|
{
|
||||||
u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
|
const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
|
||||||
|
shadow_dirty_mask;
|
||||||
struct tdp_iter iter;
|
struct tdp_iter iter;
|
||||||
bool spte_set = false;
|
bool spte_set = false;
|
||||||
|
|
||||||
|
@ -1530,7 +1541,7 @@ retry:
|
||||||
if (!is_shadow_present_pte(iter.old_spte))
|
if (!is_shadow_present_pte(iter.old_spte))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
KVM_MMU_WARN_ON(kvm_ad_enabled() &&
|
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
|
||||||
spte_ad_need_write_protect(iter.old_spte));
|
spte_ad_need_write_protect(iter.old_spte));
|
||||||
|
|
||||||
if (!(iter.old_spte & dbit))
|
if (!(iter.old_spte & dbit))
|
||||||
|
@ -1578,8 +1589,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
|
||||||
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
|
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
gfn_t gfn, unsigned long mask, bool wrprot)
|
gfn_t gfn, unsigned long mask, bool wrprot)
|
||||||
{
|
{
|
||||||
u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
|
const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
|
||||||
shadow_dirty_mask;
|
shadow_dirty_mask;
|
||||||
struct tdp_iter iter;
|
struct tdp_iter iter;
|
||||||
|
|
||||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||||
|
@ -1591,7 +1602,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
if (!mask)
|
if (!mask)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
KVM_MMU_WARN_ON(kvm_ad_enabled() &&
|
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
|
||||||
spte_ad_need_write_protect(iter.old_spte));
|
spte_ad_need_write_protect(iter.old_spte));
|
||||||
|
|
||||||
if (iter.level > PG_LEVEL_4K ||
|
if (iter.level > PG_LEVEL_4K ||
|
||||||
|
|
|
@ -7916,8 +7916,28 @@ static u64 vmx_get_perf_capabilities(void)
|
||||||
|
|
||||||
if (vmx_pebs_supported()) {
|
if (vmx_pebs_supported()) {
|
||||||
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
|
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
|
||||||
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
|
|
||||||
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
|
/*
|
||||||
|
* Disallow adaptive PEBS as it is functionally broken, can be
|
||||||
|
* used by the guest to read *host* LBRs, and can be used to
|
||||||
|
* bypass userspace event filters. To correctly and safely
|
||||||
|
* support adaptive PEBS, KVM needs to:
|
||||||
|
*
|
||||||
|
* 1. Account for the ADAPTIVE flag when (re)programming fixed
|
||||||
|
* counters.
|
||||||
|
*
|
||||||
|
* 2. Gain support from perf (or take direct control of counter
|
||||||
|
* programming) to support events without adaptive PEBS
|
||||||
|
* enabled for the hardware counter.
|
||||||
|
*
|
||||||
|
* 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
|
||||||
|
* adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
|
||||||
|
*
|
||||||
|
* 4. Document which PMU events are effectively exposed to the
|
||||||
|
* guest via adaptive PEBS, and make adaptive PEBS mutually
|
||||||
|
* exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
|
||||||
|
*/
|
||||||
|
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return perf_cap;
|
return perf_cap;
|
||||||
|
|
|
@ -3322,7 +3322,7 @@ static bool is_mci_status_msr(u32 msr)
|
||||||
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
|
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
/* McStatusWrEn enabled? */
|
/* McStatusWrEn enabled? */
|
||||||
if (guest_cpuid_is_amd_or_hygon(vcpu))
|
if (guest_cpuid_is_amd_compatible(vcpu))
|
||||||
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
|
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
|
||||||
}
|
}
|
||||||
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
|
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
|
||||||
buf[cnt++] = attr_ch;
|
buf[cnt++] = attr_ch;
|
||||||
while (tmpx < vc->vc_cols - 1) {
|
while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
|
||||||
tmp_pos += 2;
|
tmp_pos += 2;
|
||||||
tmpx++;
|
tmpx++;
|
||||||
ch = get_char(vc, (u_short *)tmp_pos, &temp);
|
ch = get_char(vc, (u_short *)tmp_pos, &temp);
|
||||||
|
|
|
@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
|
||||||
size_t object_size = 0;
|
size_t object_size = 0;
|
||||||
|
|
||||||
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
|
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
|
||||||
if (offset > buffer->data_size || read_size < sizeof(*hdr))
|
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
|
||||||
|
!IS_ALIGNED(offset, sizeof(u32)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (u) {
|
if (u) {
|
||||||
if (copy_from_user(object, u + offset, read_size))
|
if (copy_from_user(object, u + offset, read_size))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
|
||||||
|
|
||||||
static void __cold _credit_init_bits(size_t bits)
|
static void __cold _credit_init_bits(size_t bits)
|
||||||
{
|
{
|
||||||
static struct execute_work set_ready;
|
static DECLARE_WORK(set_ready, crng_set_ready);
|
||||||
unsigned int new, orig, add;
|
unsigned int new, orig, add;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
|
||||||
|
|
||||||
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
|
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
|
||||||
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
|
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
|
||||||
if (static_key_initialized)
|
if (static_key_initialized && system_unbound_wq)
|
||||||
execute_in_process_context(crng_set_ready, &set_ready);
|
queue_work(system_unbound_wq, &set_ready);
|
||||||
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
|
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
|
||||||
wake_up_interruptible(&crng_init_wait);
|
wake_up_interruptible(&crng_init_wait);
|
||||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||||
|
@ -890,8 +890,8 @@ void __init random_init(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we were initialized by the cpu or bootloader before jump labels
|
* If we were initialized by the cpu or bootloader before jump labels
|
||||||
* are initialized, then we should enable the static branch here, where
|
* or workqueues are initialized, then we should enable the static
|
||||||
* it's guaranteed that jump labels have been initialized.
|
* branch here, where it's guaranteed that these have been initialized.
|
||||||
*/
|
*/
|
||||||
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
|
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
|
||||||
crng_set_ready(NULL);
|
crng_set_ready(NULL);
|
||||||
|
|
|
@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
|
||||||
static HLIST_HEAD(clk_orphan_list);
|
static HLIST_HEAD(clk_orphan_list);
|
||||||
static LIST_HEAD(clk_notifier_list);
|
static LIST_HEAD(clk_notifier_list);
|
||||||
|
|
||||||
|
/* List of registered clks that use runtime PM */
|
||||||
|
static HLIST_HEAD(clk_rpm_list);
|
||||||
|
static DEFINE_MUTEX(clk_rpm_list_lock);
|
||||||
|
|
||||||
static const struct hlist_head *all_lists[] = {
|
static const struct hlist_head *all_lists[] = {
|
||||||
&clk_root_list,
|
&clk_root_list,
|
||||||
&clk_orphan_list,
|
&clk_orphan_list,
|
||||||
|
@ -59,6 +63,7 @@ struct clk_core {
|
||||||
struct clk_hw *hw;
|
struct clk_hw *hw;
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
struct hlist_node rpm_node;
|
||||||
struct device_node *of_node;
|
struct device_node *of_node;
|
||||||
struct clk_core *parent;
|
struct clk_core *parent;
|
||||||
struct clk_parent_map *parents;
|
struct clk_parent_map *parents;
|
||||||
|
@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
|
||||||
pm_runtime_put_sync(core->dev);
|
pm_runtime_put_sync(core->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
|
||||||
|
*
|
||||||
|
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
|
||||||
|
* that disabling unused clks avoids a deadlock where a device is runtime PM
|
||||||
|
* resuming/suspending and the runtime PM callback is trying to grab the
|
||||||
|
* prepare_lock for something like clk_prepare_enable() while
|
||||||
|
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
|
||||||
|
* PM resume/suspend the device as well.
|
||||||
|
*
|
||||||
|
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
|
||||||
|
* success. Otherwise the lock is released on failure.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, negative errno otherwise.
|
||||||
|
*/
|
||||||
|
static int clk_pm_runtime_get_all(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct clk_core *core, *failed;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Grab the list lock to prevent any new clks from being registered
|
||||||
|
* or unregistered until clk_pm_runtime_put_all().
|
||||||
|
*/
|
||||||
|
mutex_lock(&clk_rpm_list_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Runtime PM "get" all the devices that are needed for the clks
|
||||||
|
* currently registered. Do this without holding the prepare_lock, to
|
||||||
|
* avoid the deadlock.
|
||||||
|
*/
|
||||||
|
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
|
||||||
|
ret = clk_pm_runtime_get(core);
|
||||||
|
if (ret) {
|
||||||
|
failed = core;
|
||||||
|
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
|
||||||
|
dev_name(failed->dev), failed->name);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
|
||||||
|
if (core == failed)
|
||||||
|
break;
|
||||||
|
|
||||||
|
clk_pm_runtime_put(core);
|
||||||
|
}
|
||||||
|
mutex_unlock(&clk_rpm_list_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
|
||||||
|
*
|
||||||
|
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
|
||||||
|
* the 'clk_rpm_list_lock'.
|
||||||
|
*/
|
||||||
|
static void clk_pm_runtime_put_all(void)
|
||||||
|
{
|
||||||
|
struct clk_core *core;
|
||||||
|
|
||||||
|
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
|
||||||
|
clk_pm_runtime_put(core);
|
||||||
|
mutex_unlock(&clk_rpm_list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void clk_pm_runtime_init(struct clk_core *core)
|
||||||
|
{
|
||||||
|
struct device *dev = core->dev;
|
||||||
|
|
||||||
|
if (dev && pm_runtime_enabled(dev)) {
|
||||||
|
core->rpm_enabled = true;
|
||||||
|
|
||||||
|
mutex_lock(&clk_rpm_list_lock);
|
||||||
|
hlist_add_head(&core->rpm_node, &clk_rpm_list);
|
||||||
|
mutex_unlock(&clk_rpm_list_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*** locking ***/
|
/*** locking ***/
|
||||||
static void clk_prepare_lock(void)
|
static void clk_prepare_lock(void)
|
||||||
{
|
{
|
||||||
|
@ -1362,9 +1450,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
|
||||||
if (core->flags & CLK_IGNORE_UNUSED)
|
if (core->flags & CLK_IGNORE_UNUSED)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (clk_pm_runtime_get(core))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (clk_core_is_prepared(core)) {
|
if (clk_core_is_prepared(core)) {
|
||||||
trace_clk_unprepare(core);
|
trace_clk_unprepare(core);
|
||||||
if (core->ops->unprepare_unused)
|
if (core->ops->unprepare_unused)
|
||||||
|
@ -1373,8 +1458,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
|
||||||
core->ops->unprepare(core->hw);
|
core->ops->unprepare(core->hw);
|
||||||
trace_clk_unprepare_complete(core);
|
trace_clk_unprepare_complete(core);
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_pm_runtime_put(core);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init clk_disable_unused_subtree(struct clk_core *core)
|
static void __init clk_disable_unused_subtree(struct clk_core *core)
|
||||||
|
@ -1390,9 +1473,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
|
||||||
if (core->flags & CLK_OPS_PARENT_ENABLE)
|
if (core->flags & CLK_OPS_PARENT_ENABLE)
|
||||||
clk_core_prepare_enable(core->parent);
|
clk_core_prepare_enable(core->parent);
|
||||||
|
|
||||||
if (clk_pm_runtime_get(core))
|
|
||||||
goto unprepare_out;
|
|
||||||
|
|
||||||
flags = clk_enable_lock();
|
flags = clk_enable_lock();
|
||||||
|
|
||||||
if (core->enable_count)
|
if (core->enable_count)
|
||||||
|
@ -1417,8 +1497,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
|
||||||
|
|
||||||
unlock_out:
|
unlock_out:
|
||||||
clk_enable_unlock(flags);
|
clk_enable_unlock(flags);
|
||||||
clk_pm_runtime_put(core);
|
|
||||||
unprepare_out:
|
|
||||||
if (core->flags & CLK_OPS_PARENT_ENABLE)
|
if (core->flags & CLK_OPS_PARENT_ENABLE)
|
||||||
clk_core_disable_unprepare(core->parent);
|
clk_core_disable_unprepare(core->parent);
|
||||||
}
|
}
|
||||||
|
@ -1434,6 +1512,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
|
||||||
static int __init clk_disable_unused(void)
|
static int __init clk_disable_unused(void)
|
||||||
{
|
{
|
||||||
struct clk_core *core;
|
struct clk_core *core;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (clk_ignore_unused) {
|
if (clk_ignore_unused) {
|
||||||
pr_warn("clk: Not disabling unused clocks\n");
|
pr_warn("clk: Not disabling unused clocks\n");
|
||||||
|
@ -1442,6 +1521,13 @@ static int __init clk_disable_unused(void)
|
||||||
|
|
||||||
pr_info("clk: Disabling unused clocks\n");
|
pr_info("clk: Disabling unused clocks\n");
|
||||||
|
|
||||||
|
ret = clk_pm_runtime_get_all();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
/*
|
||||||
|
* Grab the prepare lock to keep the clk topology stable while iterating
|
||||||
|
* over clks.
|
||||||
|
*/
|
||||||
clk_prepare_lock();
|
clk_prepare_lock();
|
||||||
|
|
||||||
hlist_for_each_entry(core, &clk_root_list, child_node)
|
hlist_for_each_entry(core, &clk_root_list, child_node)
|
||||||
|
@ -1458,6 +1544,8 @@ static int __init clk_disable_unused(void)
|
||||||
|
|
||||||
clk_prepare_unlock();
|
clk_prepare_unlock();
|
||||||
|
|
||||||
|
clk_pm_runtime_put_all();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
late_initcall_sync(clk_disable_unused);
|
late_initcall_sync(clk_disable_unused);
|
||||||
|
@ -3191,28 +3279,41 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
|
||||||
int level)
|
int level)
|
||||||
{
|
{
|
||||||
int phase;
|
int phase;
|
||||||
|
struct clk *clk_user;
|
||||||
|
int multi_node = 0;
|
||||||
|
|
||||||
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
|
seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
|
||||||
level * 3 + 1, "",
|
level * 3 + 1, "",
|
||||||
30 - level * 3, c->name,
|
35 - level * 3, c->name,
|
||||||
c->enable_count, c->prepare_count, c->protect_count,
|
c->enable_count, c->prepare_count, c->protect_count,
|
||||||
clk_core_get_rate_recalc(c),
|
clk_core_get_rate_recalc(c),
|
||||||
clk_core_get_accuracy_recalc(c));
|
clk_core_get_accuracy_recalc(c));
|
||||||
|
|
||||||
phase = clk_core_get_phase(c);
|
phase = clk_core_get_phase(c);
|
||||||
if (phase >= 0)
|
if (phase >= 0)
|
||||||
seq_printf(s, "%5d", phase);
|
seq_printf(s, "%-5d", phase);
|
||||||
else
|
else
|
||||||
seq_puts(s, "-----");
|
seq_puts(s, "-----");
|
||||||
|
|
||||||
seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
|
seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
|
||||||
|
|
||||||
if (c->ops->is_enabled)
|
if (c->ops->is_enabled)
|
||||||
seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
|
seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
|
||||||
else if (!c->ops->enable)
|
else if (!c->ops->enable)
|
||||||
seq_printf(s, " %9c\n", 'Y');
|
seq_printf(s, " %5c ", 'Y');
|
||||||
else
|
else
|
||||||
seq_printf(s, " %9c\n", '?');
|
seq_printf(s, " %5c ", '?');
|
||||||
|
|
||||||
|
hlist_for_each_entry(clk_user, &c->clks, clks_node) {
|
||||||
|
seq_printf(s, "%*s%-*s %-25s\n",
|
||||||
|
level * 3 + 2 + 105 * multi_node, "",
|
||||||
|
30,
|
||||||
|
clk_user->dev_id ? clk_user->dev_id : "deviceless",
|
||||||
|
clk_user->con_id ? clk_user->con_id : "no_connection_id");
|
||||||
|
|
||||||
|
multi_node = 1;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
|
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
|
||||||
|
@ -3220,9 +3321,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
|
||||||
{
|
{
|
||||||
struct clk_core *child;
|
struct clk_core *child;
|
||||||
|
|
||||||
clk_pm_runtime_get(c);
|
|
||||||
clk_summary_show_one(s, c, level);
|
clk_summary_show_one(s, c, level);
|
||||||
clk_pm_runtime_put(c);
|
|
||||||
|
|
||||||
hlist_for_each_entry(child, &c->children, child_node)
|
hlist_for_each_entry(child, &c->children, child_node)
|
||||||
clk_summary_show_subtree(s, child, level + 1);
|
clk_summary_show_subtree(s, child, level + 1);
|
||||||
|
@ -3232,10 +3331,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
|
||||||
{
|
{
|
||||||
struct clk_core *c;
|
struct clk_core *c;
|
||||||
struct hlist_head **lists = s->private;
|
struct hlist_head **lists = s->private;
|
||||||
|
int ret;
|
||||||
|
|
||||||
seq_puts(s, " enable prepare protect duty hardware\n");
|
seq_puts(s, " enable prepare protect duty hardware connection\n");
|
||||||
seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
|
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
|
||||||
seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
|
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
|
||||||
|
|
||||||
|
ret = clk_pm_runtime_get_all();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
clk_prepare_lock();
|
clk_prepare_lock();
|
||||||
|
|
||||||
|
@ -3244,6 +3348,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
|
||||||
clk_summary_show_subtree(s, c, 0);
|
clk_summary_show_subtree(s, c, 0);
|
||||||
|
|
||||||
clk_prepare_unlock();
|
clk_prepare_unlock();
|
||||||
|
clk_pm_runtime_put_all();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3291,8 +3396,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
|
||||||
struct clk_core *c;
|
struct clk_core *c;
|
||||||
bool first_node = true;
|
bool first_node = true;
|
||||||
struct hlist_head **lists = s->private;
|
struct hlist_head **lists = s->private;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = clk_pm_runtime_get_all();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
seq_putc(s, '{');
|
seq_putc(s, '{');
|
||||||
|
|
||||||
clk_prepare_lock();
|
clk_prepare_lock();
|
||||||
|
|
||||||
for (; *lists; lists++) {
|
for (; *lists; lists++) {
|
||||||
|
@ -3305,6 +3416,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_prepare_unlock();
|
clk_prepare_unlock();
|
||||||
|
clk_pm_runtime_put_all();
|
||||||
|
|
||||||
seq_puts(s, "}\n");
|
seq_puts(s, "}\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3919,8 +4031,6 @@ static int __clk_core_init(struct clk_core *core)
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_core_reparent_orphans_nolock();
|
clk_core_reparent_orphans_nolock();
|
||||||
|
|
||||||
kref_init(&core->ref);
|
|
||||||
out:
|
out:
|
||||||
clk_pm_runtime_put(core);
|
clk_pm_runtime_put(core);
|
||||||
unlock:
|
unlock:
|
||||||
|
@ -4149,6 +4259,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
|
||||||
kfree(core->parents);
|
kfree(core->parents);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Free memory allocated for a struct clk_core */
|
||||||
|
static void __clk_release(struct kref *ref)
|
||||||
|
{
|
||||||
|
struct clk_core *core = container_of(ref, struct clk_core, ref);
|
||||||
|
|
||||||
|
if (core->rpm_enabled) {
|
||||||
|
mutex_lock(&clk_rpm_list_lock);
|
||||||
|
hlist_del(&core->rpm_node);
|
||||||
|
mutex_unlock(&clk_rpm_list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
clk_core_free_parent_map(core);
|
||||||
|
kfree_const(core->name);
|
||||||
|
kfree(core);
|
||||||
|
}
|
||||||
|
|
||||||
static struct clk *
|
static struct clk *
|
||||||
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
||||||
{
|
{
|
||||||
|
@ -4169,6 +4295,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
||||||
goto fail_out;
|
goto fail_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kref_init(&core->ref);
|
||||||
|
|
||||||
core->name = kstrdup_const(init->name, GFP_KERNEL);
|
core->name = kstrdup_const(init->name, GFP_KERNEL);
|
||||||
if (!core->name) {
|
if (!core->name) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -4181,9 +4309,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
||||||
}
|
}
|
||||||
core->ops = init->ops;
|
core->ops = init->ops;
|
||||||
|
|
||||||
if (dev && pm_runtime_enabled(dev))
|
|
||||||
core->rpm_enabled = true;
|
|
||||||
core->dev = dev;
|
core->dev = dev;
|
||||||
|
clk_pm_runtime_init(core);
|
||||||
core->of_node = np;
|
core->of_node = np;
|
||||||
if (dev && dev->driver)
|
if (dev && dev->driver)
|
||||||
core->owner = dev->driver->owner;
|
core->owner = dev->driver->owner;
|
||||||
|
@ -4223,12 +4350,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
||||||
hw->clk = NULL;
|
hw->clk = NULL;
|
||||||
|
|
||||||
fail_create_clk:
|
fail_create_clk:
|
||||||
clk_core_free_parent_map(core);
|
|
||||||
fail_parents:
|
fail_parents:
|
||||||
fail_ops:
|
fail_ops:
|
||||||
kfree_const(core->name);
|
|
||||||
fail_name:
|
fail_name:
|
||||||
kfree(core);
|
kref_put(&core->ref, __clk_release);
|
||||||
fail_out:
|
fail_out:
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
@ -4308,18 +4433,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(of_clk_hw_register);
|
EXPORT_SYMBOL_GPL(of_clk_hw_register);
|
||||||
|
|
||||||
/* Free memory allocated for a clock. */
|
|
||||||
static void __clk_release(struct kref *ref)
|
|
||||||
{
|
|
||||||
struct clk_core *core = container_of(ref, struct clk_core, ref);
|
|
||||||
|
|
||||||
lockdep_assert_held(&prepare_lock);
|
|
||||||
|
|
||||||
clk_core_free_parent_map(core);
|
|
||||||
kfree_const(core->name);
|
|
||||||
kfree(core);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Empty clk_ops for unregistered clocks. These are used temporarily
|
* Empty clk_ops for unregistered clocks. These are used temporarily
|
||||||
* after clk_unregister() was called on a clock and until last clock
|
* after clk_unregister() was called on a clock and until last clock
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#include "clk-mtk.h"
|
#include "clk-mtk.h"
|
||||||
|
@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
|
||||||
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
|
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
devm_pm_runtime_enable(&pdev->dev);
|
||||||
|
/*
|
||||||
|
* Do a pm_runtime_resume_and_get() to workaround a possible
|
||||||
|
* deadlock between clk_register() and the genpd framework.
|
||||||
|
*/
|
||||||
|
r = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
/* Calculate how many clk_hw_onecell_data entries to allocate */
|
/* Calculate how many clk_hw_onecell_data entries to allocate */
|
||||||
num_clks = mcd->num_clks + mcd->num_composite_clks;
|
num_clks = mcd->num_clks + mcd->num_composite_clks;
|
||||||
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
|
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
|
||||||
|
@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
|
||||||
goto unregister_clks;
|
goto unregister_clks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pm_runtime_put(&pdev->dev);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
unregister_clks:
|
unregister_clks:
|
||||||
|
@ -604,6 +617,8 @@ free_data:
|
||||||
free_base:
|
free_base:
|
||||||
if (mcd->shared_io && base)
|
if (mcd->shared_io && base)
|
||||||
iounmap(base);
|
iounmap(base);
|
||||||
|
|
||||||
|
pm_runtime_put(&pdev->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -641,32 +641,21 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
|
||||||
struct vmk80xx_private *devpriv = dev->private;
|
struct vmk80xx_private *devpriv = dev->private;
|
||||||
struct usb_interface *intf = comedi_to_usb_interface(dev);
|
struct usb_interface *intf = comedi_to_usb_interface(dev);
|
||||||
struct usb_host_interface *iface_desc = intf->cur_altsetting;
|
struct usb_host_interface *iface_desc = intf->cur_altsetting;
|
||||||
struct usb_endpoint_descriptor *ep_desc;
|
struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
|
||||||
int i;
|
int ret;
|
||||||
|
|
||||||
if (iface_desc->desc.bNumEndpoints != 2)
|
if (devpriv->model == VMK8061_MODEL)
|
||||||
|
ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
|
||||||
|
&ep_tx_desc, NULL, NULL);
|
||||||
|
else
|
||||||
|
ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
|
||||||
|
&ep_rx_desc, &ep_tx_desc);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
|
devpriv->ep_rx = ep_rx_desc;
|
||||||
ep_desc = &iface_desc->endpoint[i].desc;
|
devpriv->ep_tx = ep_tx_desc;
|
||||||
|
|
||||||
if (usb_endpoint_is_int_in(ep_desc) ||
|
|
||||||
usb_endpoint_is_bulk_in(ep_desc)) {
|
|
||||||
if (!devpriv->ep_rx)
|
|
||||||
devpriv->ep_rx = ep_desc;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (usb_endpoint_is_int_out(ep_desc) ||
|
|
||||||
usb_endpoint_is_bulk_out(ep_desc)) {
|
|
||||||
if (!devpriv->ep_tx)
|
|
||||||
devpriv->ep_tx = ep_desc;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!devpriv->ep_rx || !devpriv->ep_tx)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
|
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -562,7 +562,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
||||||
struct ttm_resource *mem)
|
struct ttm_resource *mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||||
size_t bus_size = (size_t)mem->size;
|
|
||||||
|
|
||||||
switch (mem->mem_type) {
|
switch (mem->mem_type) {
|
||||||
case TTM_PL_SYSTEM:
|
case TTM_PL_SYSTEM:
|
||||||
|
@ -573,9 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
||||||
break;
|
break;
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||||
/* check if it's visible */
|
|
||||||
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (adev->mman.aper_base_kaddr &&
|
if (adev->mman.aper_base_kaddr &&
|
||||||
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
|
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
|
||||||
|
|
|
@ -1500,6 +1500,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
||||||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validate operation parameters to prevent potential abuse */
|
||||||
|
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo *bo,
|
||||||
|
uint64_t saddr,
|
||||||
|
uint64_t offset,
|
||||||
|
uint64_t size)
|
||||||
|
{
|
||||||
|
uint64_t tmp, lpfn;
|
||||||
|
|
||||||
|
if (saddr & AMDGPU_GPU_PAGE_MASK
|
||||||
|
|| offset & AMDGPU_GPU_PAGE_MASK
|
||||||
|
|| size & AMDGPU_GPU_PAGE_MASK)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (check_add_overflow(saddr, size, &tmp)
|
||||||
|
|| check_add_overflow(offset, size, &tmp)
|
||||||
|
|| size == 0 /* which also leads to end < begin */)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* make sure object fit at this offset */
|
||||||
|
if (bo && offset + size > amdgpu_bo_size(bo))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Ensure last pfn not exceed max_pfn */
|
||||||
|
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
|
||||||
|
if (lpfn >= adev->vm_manager.max_pfn)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_bo_map - map bo inside a vm
|
* amdgpu_vm_bo_map - map bo inside a vm
|
||||||
*
|
*
|
||||||
|
@ -1526,21 +1557,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||||
uint64_t eaddr;
|
uint64_t eaddr;
|
||||||
|
int r;
|
||||||
|
|
||||||
/* validate the parameters */
|
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
|
||||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
|
if (r)
|
||||||
return -EINVAL;
|
return r;
|
||||||
if (saddr + size <= saddr || offset + size <= offset)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* make sure object fit at this offset */
|
|
||||||
eaddr = saddr + size - 1;
|
|
||||||
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
|
|
||||||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
||||||
if (tmp) {
|
if (tmp) {
|
||||||
|
@ -1593,17 +1617,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
||||||
uint64_t eaddr;
|
uint64_t eaddr;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* validate the parameters */
|
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
|
||||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
|
if (r)
|
||||||
return -EINVAL;
|
return r;
|
||||||
if (saddr + size <= saddr || offset + size <= offset)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* make sure object fit at this offset */
|
|
||||||
eaddr = saddr + size - 1;
|
|
||||||
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
|
|
||||||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* Allocate all the needed memory */
|
/* Allocate all the needed memory */
|
||||||
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
||||||
|
@ -1617,7 +1633,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
mapping->start = saddr;
|
mapping->start = saddr;
|
||||||
mapping->last = eaddr;
|
mapping->last = eaddr;
|
||||||
|
@ -1704,10 +1720,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
||||||
LIST_HEAD(removed);
|
LIST_HEAD(removed);
|
||||||
uint64_t eaddr;
|
uint64_t eaddr;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
eaddr = saddr + size - 1;
|
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
/* Allocate all the needed memory */
|
/* Allocate all the needed memory */
|
||||||
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
||||||
|
|
|
@ -818,9 +818,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
|
||||||
mutex_lock(&kfd_processes_mutex);
|
mutex_lock(&kfd_processes_mutex);
|
||||||
|
|
||||||
if (kfd_is_locked()) {
|
if (kfd_is_locked()) {
|
||||||
mutex_unlock(&kfd_processes_mutex);
|
|
||||||
pr_debug("KFD is locked! Cannot create process");
|
pr_debug("KFD is locked! Cannot create process");
|
||||||
return ERR_PTR(-EINVAL);
|
process = ERR_PTR(-EINVAL);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* A prior open of /dev/kfd could have already created the process. */
|
/* A prior open of /dev/kfd could have already created the process. */
|
||||||
|
|
|
@ -236,9 +236,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
|
||||||
OTG_V_TOTAL_MAX_SEL, 1,
|
OTG_V_TOTAL_MAX_SEL, 1,
|
||||||
OTG_FORCE_LOCK_ON_EVENT, 0,
|
OTG_FORCE_LOCK_ON_EVENT, 0,
|
||||||
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
|
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
|
||||||
|
|
||||||
// Setup manual flow control for EOF via TRIG_A
|
|
||||||
optc->funcs->setup_manual_trigger(optc);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -348,6 +348,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
|
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||||
|
}, { /* Lenovo Legion Go 8APU1 */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&lcd1600x2560_leftside_up,
|
||||||
}, { /* Lenovo Yoga Book X90F / X90L */
|
}, { /* Lenovo Yoga Book X90F / X90L */
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||||
|
|
|
@ -259,6 +259,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||||
drm_property_blob_get(crtc_state->post_csc_lut);
|
drm_property_blob_get(crtc_state->post_csc_lut);
|
||||||
|
|
||||||
crtc_state->update_pipe = false;
|
crtc_state->update_pipe = false;
|
||||||
|
crtc_state->update_m_n = false;
|
||||||
crtc_state->disable_lp_wm = false;
|
crtc_state->disable_lp_wm = false;
|
||||||
crtc_state->disable_cxsr = false;
|
crtc_state->disable_cxsr = false;
|
||||||
crtc_state->update_wm_pre = false;
|
crtc_state->update_wm_pre = false;
|
||||||
|
|
|
@ -2453,7 +2453,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
|
||||||
intel_atomic_get_old_cdclk_state(state);
|
intel_atomic_get_old_cdclk_state(state);
|
||||||
const struct intel_cdclk_state *new_cdclk_state =
|
const struct intel_cdclk_state *new_cdclk_state =
|
||||||
intel_atomic_get_new_cdclk_state(state);
|
intel_atomic_get_new_cdclk_state(state);
|
||||||
enum pipe pipe = new_cdclk_state->pipe;
|
struct intel_cdclk_config cdclk_config;
|
||||||
|
enum pipe pipe;
|
||||||
|
|
||||||
if (!intel_cdclk_changed(&old_cdclk_state->actual,
|
if (!intel_cdclk_changed(&old_cdclk_state->actual,
|
||||||
&new_cdclk_state->actual))
|
&new_cdclk_state->actual))
|
||||||
|
@ -2462,12 +2463,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
|
||||||
if (IS_DG2(i915))
|
if (IS_DG2(i915))
|
||||||
intel_cdclk_pcode_pre_notify(state);
|
intel_cdclk_pcode_pre_notify(state);
|
||||||
|
|
||||||
if (new_cdclk_state->disable_pipes ||
|
if (new_cdclk_state->disable_pipes) {
|
||||||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
|
cdclk_config = new_cdclk_state->actual;
|
||||||
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
pipe = INVALID_PIPE;
|
||||||
|
} else {
|
||||||
|
if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
|
||||||
|
cdclk_config = new_cdclk_state->actual;
|
||||||
|
pipe = new_cdclk_state->pipe;
|
||||||
|
} else {
|
||||||
|
cdclk_config = old_cdclk_state->actual;
|
||||||
|
pipe = INVALID_PIPE;
|
||||||
|
}
|
||||||
|
|
||||||
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
|
cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
|
||||||
|
old_cdclk_state->actual.voltage_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
||||||
|
|
||||||
|
intel_set_cdclk(i915, &cdclk_config, pipe);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2485,7 +2499,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
|
||||||
intel_atomic_get_old_cdclk_state(state);
|
intel_atomic_get_old_cdclk_state(state);
|
||||||
const struct intel_cdclk_state *new_cdclk_state =
|
const struct intel_cdclk_state *new_cdclk_state =
|
||||||
intel_atomic_get_new_cdclk_state(state);
|
intel_atomic_get_new_cdclk_state(state);
|
||||||
enum pipe pipe = new_cdclk_state->pipe;
|
enum pipe pipe;
|
||||||
|
|
||||||
if (!intel_cdclk_changed(&old_cdclk_state->actual,
|
if (!intel_cdclk_changed(&old_cdclk_state->actual,
|
||||||
&new_cdclk_state->actual))
|
&new_cdclk_state->actual))
|
||||||
|
@ -2495,11 +2509,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
|
||||||
intel_cdclk_pcode_post_notify(state);
|
intel_cdclk_pcode_post_notify(state);
|
||||||
|
|
||||||
if (!new_cdclk_state->disable_pipes &&
|
if (!new_cdclk_state->disable_pipes &&
|
||||||
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
|
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
|
||||||
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
pipe = new_cdclk_state->pipe;
|
||||||
|
else
|
||||||
|
pipe = INVALID_PIPE;
|
||||||
|
|
||||||
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
|
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
||||||
}
|
|
||||||
|
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
|
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
|
||||||
|
|
|
@ -468,9 +468,56 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
|
||||||
return vblank_start;
|
return vblank_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
|
||||||
|
struct intel_crtc *crtc,
|
||||||
|
int *min, int *max, int *vblank_start)
|
||||||
|
{
|
||||||
|
const struct intel_crtc_state *old_crtc_state =
|
||||||
|
intel_atomic_get_old_crtc_state(state, crtc);
|
||||||
|
const struct intel_crtc_state *new_crtc_state =
|
||||||
|
intel_atomic_get_new_crtc_state(state, crtc);
|
||||||
|
const struct intel_crtc_state *crtc_state;
|
||||||
|
const struct drm_display_mode *adjusted_mode;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* During fastsets/etc. the transcoder is still
|
||||||
|
* running with the old timings at this point.
|
||||||
|
*
|
||||||
|
* TODO: maybe just use the active timings here?
|
||||||
|
*/
|
||||||
|
if (intel_crtc_needs_modeset(new_crtc_state))
|
||||||
|
crtc_state = new_crtc_state;
|
||||||
|
else
|
||||||
|
crtc_state = old_crtc_state;
|
||||||
|
|
||||||
|
adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||||
|
|
||||||
|
if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
|
||||||
|
if (intel_vrr_is_push_sent(crtc_state))
|
||||||
|
*vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
|
||||||
|
else
|
||||||
|
*vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
|
||||||
|
} else {
|
||||||
|
*vblank_start = intel_mode_vblank_start(adjusted_mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* FIXME needs to be calibrated sensibly */
|
||||||
|
*min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
|
||||||
|
VBLANK_EVASION_TIME_US);
|
||||||
|
*max = *vblank_start - 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* M/N is double buffered on the transcoder's undelayed vblank,
|
||||||
|
* so with seamless M/N we must evade both vblanks.
|
||||||
|
*/
|
||||||
|
if (new_crtc_state->update_m_n)
|
||||||
|
*min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_pipe_update_start() - start update of a set of display registers
|
* intel_pipe_update_start() - start update of a set of display registers
|
||||||
* @new_crtc_state: the new crtc state
|
* @state: the atomic state
|
||||||
|
* @crtc: the crtc
|
||||||
*
|
*
|
||||||
* Mark the start of an update to pipe registers that should be updated
|
* Mark the start of an update to pipe registers that should be updated
|
||||||
* atomically regarding vblank. If the next vblank will happens within
|
* atomically regarding vblank. If the next vblank will happens within
|
||||||
|
@ -480,11 +527,12 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
|
||||||
* until a subsequent call to intel_pipe_update_end(). That is done to
|
* until a subsequent call to intel_pipe_update_end(). That is done to
|
||||||
* avoid random delays.
|
* avoid random delays.
|
||||||
*/
|
*/
|
||||||
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
|
void intel_pipe_update_start(struct intel_atomic_state *state,
|
||||||
|
struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
|
||||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||||
const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
|
struct intel_crtc_state *new_crtc_state =
|
||||||
|
intel_atomic_get_new_crtc_state(state, crtc);
|
||||||
long timeout = msecs_to_jiffies_timeout(1);
|
long timeout = msecs_to_jiffies_timeout(1);
|
||||||
int scanline, min, max, vblank_start;
|
int scanline, min, max, vblank_start;
|
||||||
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
|
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
|
||||||
|
@ -500,27 +548,7 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
|
||||||
if (intel_crtc_needs_vblank_work(new_crtc_state))
|
if (intel_crtc_needs_vblank_work(new_crtc_state))
|
||||||
intel_crtc_vblank_work_init(new_crtc_state);
|
intel_crtc_vblank_work_init(new_crtc_state);
|
||||||
|
|
||||||
if (new_crtc_state->vrr.enable) {
|
intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
|
||||||
if (intel_vrr_is_push_sent(new_crtc_state))
|
|
||||||
vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
|
|
||||||
else
|
|
||||||
vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
|
|
||||||
} else {
|
|
||||||
vblank_start = intel_mode_vblank_start(adjusted_mode);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* FIXME needs to be calibrated sensibly */
|
|
||||||
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
|
|
||||||
VBLANK_EVASION_TIME_US);
|
|
||||||
max = vblank_start - 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* M/N is double buffered on the transcoder's undelayed vblank,
|
|
||||||
* so with seamless M/N we must evade both vblanks.
|
|
||||||
*/
|
|
||||||
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
|
|
||||||
min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
|
|
||||||
|
|
||||||
if (min <= 0 || max <= 0)
|
if (min <= 0 || max <= 0)
|
||||||
goto irq_disable;
|
goto irq_disable;
|
||||||
|
|
||||||
|
@ -631,15 +659,18 @@ static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_pipe_update_end() - end update of a set of display registers
|
* intel_pipe_update_end() - end update of a set of display registers
|
||||||
* @new_crtc_state: the new crtc state
|
* @state: the atomic state
|
||||||
|
* @crtc: the crtc
|
||||||
*
|
*
|
||||||
* Mark the end of an update started with intel_pipe_update_start(). This
|
* Mark the end of an update started with intel_pipe_update_start(). This
|
||||||
* re-enables interrupts and verifies the update was actually completed
|
* re-enables interrupts and verifies the update was actually completed
|
||||||
* before a vblank.
|
* before a vblank.
|
||||||
*/
|
*/
|
||||||
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
|
void intel_pipe_update_end(struct intel_atomic_state *state,
|
||||||
|
struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
struct intel_crtc_state *new_crtc_state =
|
||||||
|
intel_atomic_get_new_crtc_state(state, crtc);
|
||||||
enum pipe pipe = crtc->pipe;
|
enum pipe pipe = crtc->pipe;
|
||||||
int scanline_end = intel_get_crtc_scanline(crtc);
|
int scanline_end = intel_get_crtc_scanline(crtc);
|
||||||
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
|
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
|
||||||
|
@ -697,15 +728,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
|
||||||
*/
|
*/
|
||||||
intel_vrr_send_push(new_crtc_state);
|
intel_vrr_send_push(new_crtc_state);
|
||||||
|
|
||||||
/*
|
|
||||||
* Seamless M/N update may need to update frame timings.
|
|
||||||
*
|
|
||||||
* FIXME Should be synchronized with the start of vblank somehow...
|
|
||||||
*/
|
|
||||||
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
|
|
||||||
intel_crtc_update_active_timings(new_crtc_state,
|
|
||||||
new_crtc_state->vrr.enable);
|
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv))
|
if (intel_vgpu_active(dev_priv))
|
||||||
|
|
|
@ -36,8 +36,10 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
|
||||||
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
|
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
|
||||||
void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
|
void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
|
||||||
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
|
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
|
||||||
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
|
void intel_pipe_update_start(struct intel_atomic_state *state,
|
||||||
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
|
struct intel_crtc *crtc);
|
||||||
|
void intel_pipe_update_end(struct intel_atomic_state *state,
|
||||||
|
struct intel_crtc *crtc);
|
||||||
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
|
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
|
||||||
struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
|
struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
|
||||||
struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
|
struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
|
||||||
|
|
|
@ -5215,7 +5215,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||||
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
|
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
|
||||||
|
|
||||||
if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
|
if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
|
||||||
if (!fastset || !pipe_config->seamless_m_n)
|
if (!fastset || !pipe_config->update_m_n)
|
||||||
PIPE_CONF_CHECK_M_N(dp_m_n);
|
PIPE_CONF_CHECK_M_N(dp_m_n);
|
||||||
} else {
|
} else {
|
||||||
PIPE_CONF_CHECK_M_N(dp_m_n);
|
PIPE_CONF_CHECK_M_N(dp_m_n);
|
||||||
|
@ -5353,7 +5353,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||||
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
|
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
|
||||||
PIPE_CONF_CHECK_I(pipe_bpp);
|
PIPE_CONF_CHECK_I(pipe_bpp);
|
||||||
|
|
||||||
if (!fastset || !pipe_config->seamless_m_n) {
|
if (!fastset || !pipe_config->update_m_n) {
|
||||||
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
|
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
|
||||||
PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
|
PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
|
||||||
}
|
}
|
||||||
|
@ -5448,6 +5448,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
|
||||||
|
|
||||||
crtc_state->uapi.mode_changed = true;
|
crtc_state->uapi.mode_changed = true;
|
||||||
crtc_state->update_pipe = false;
|
crtc_state->update_pipe = false;
|
||||||
|
crtc_state->update_m_n = false;
|
||||||
|
|
||||||
ret = drm_atomic_add_affected_connectors(&state->base,
|
ret = drm_atomic_add_affected_connectors(&state->base,
|
||||||
&crtc->base);
|
&crtc->base);
|
||||||
|
@ -5565,13 +5566,14 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
|
struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
|
||||||
|
|
||||||
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
|
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
|
||||||
drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
|
drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
|
||||||
|
else
|
||||||
|
new_crtc_state->uapi.mode_changed = false;
|
||||||
|
|
||||||
return;
|
if (intel_crtc_needs_modeset(new_crtc_state))
|
||||||
}
|
new_crtc_state->update_m_n = false;
|
||||||
|
|
||||||
new_crtc_state->uapi.mode_changed = false;
|
|
||||||
if (!intel_crtc_needs_modeset(new_crtc_state))
|
if (!intel_crtc_needs_modeset(new_crtc_state))
|
||||||
new_crtc_state->update_pipe = true;
|
new_crtc_state->update_pipe = true;
|
||||||
}
|
}
|
||||||
|
@ -6297,6 +6299,7 @@ int intel_atomic_check(struct drm_device *dev,
|
||||||
if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
|
if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
|
||||||
new_crtc_state->uapi.mode_changed = true;
|
new_crtc_state->uapi.mode_changed = true;
|
||||||
new_crtc_state->update_pipe = false;
|
new_crtc_state->update_pipe = false;
|
||||||
|
new_crtc_state->update_m_n = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6309,6 +6312,7 @@ int intel_atomic_check(struct drm_device *dev,
|
||||||
if (intel_cpu_transcoders_need_modeset(state, trans)) {
|
if (intel_cpu_transcoders_need_modeset(state, trans)) {
|
||||||
new_crtc_state->uapi.mode_changed = true;
|
new_crtc_state->uapi.mode_changed = true;
|
||||||
new_crtc_state->update_pipe = false;
|
new_crtc_state->update_pipe = false;
|
||||||
|
new_crtc_state->update_m_n = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6316,6 +6320,7 @@ int intel_atomic_check(struct drm_device *dev,
|
||||||
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
|
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
|
||||||
new_crtc_state->uapi.mode_changed = true;
|
new_crtc_state->uapi.mode_changed = true;
|
||||||
new_crtc_state->update_pipe = false;
|
new_crtc_state->update_pipe = false;
|
||||||
|
new_crtc_state->update_m_n = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6494,7 +6499,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
|
||||||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
|
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
|
||||||
hsw_set_linetime_wm(new_crtc_state);
|
hsw_set_linetime_wm(new_crtc_state);
|
||||||
|
|
||||||
if (new_crtc_state->seamless_m_n)
|
if (new_crtc_state->update_m_n)
|
||||||
intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
|
intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
|
||||||
&new_crtc_state->dp_m_n);
|
&new_crtc_state->dp_m_n);
|
||||||
}
|
}
|
||||||
|
@ -6533,6 +6538,8 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
|
||||||
struct intel_crtc *crtc)
|
struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||||
|
const struct intel_crtc_state *old_crtc_state =
|
||||||
|
intel_atomic_get_old_crtc_state(state, crtc);
|
||||||
const struct intel_crtc_state *new_crtc_state =
|
const struct intel_crtc_state *new_crtc_state =
|
||||||
intel_atomic_get_new_crtc_state(state, crtc);
|
intel_atomic_get_new_crtc_state(state, crtc);
|
||||||
|
|
||||||
|
@ -6544,6 +6551,9 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
|
||||||
if (DISPLAY_VER(dev_priv) >= 9 &&
|
if (DISPLAY_VER(dev_priv) >= 9 &&
|
||||||
!intel_crtc_needs_modeset(new_crtc_state))
|
!intel_crtc_needs_modeset(new_crtc_state))
|
||||||
skl_detach_scalers(new_crtc_state);
|
skl_detach_scalers(new_crtc_state);
|
||||||
|
|
||||||
|
if (vrr_enabling(old_crtc_state, new_crtc_state))
|
||||||
|
intel_vrr_enable(new_crtc_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_enable_crtc(struct intel_atomic_state *state,
|
static void intel_enable_crtc(struct intel_atomic_state *state,
|
||||||
|
@ -6584,12 +6594,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
||||||
intel_dpt_configure(crtc);
|
intel_dpt_configure(crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vrr_enabling(old_crtc_state, new_crtc_state)) {
|
|
||||||
intel_vrr_enable(new_crtc_state);
|
|
||||||
intel_crtc_update_active_timings(new_crtc_state,
|
|
||||||
new_crtc_state->vrr.enable);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!modeset) {
|
if (!modeset) {
|
||||||
if (new_crtc_state->preload_luts &&
|
if (new_crtc_state->preload_luts &&
|
||||||
intel_crtc_needs_color_update(new_crtc_state))
|
intel_crtc_needs_color_update(new_crtc_state))
|
||||||
|
@ -6616,7 +6620,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
||||||
intel_crtc_planes_update_noarm(state, crtc);
|
intel_crtc_planes_update_noarm(state, crtc);
|
||||||
|
|
||||||
/* Perform vblank evasion around commit operation */
|
/* Perform vblank evasion around commit operation */
|
||||||
intel_pipe_update_start(new_crtc_state);
|
intel_pipe_update_start(state, crtc);
|
||||||
|
|
||||||
commit_pipe_pre_planes(state, crtc);
|
commit_pipe_pre_planes(state, crtc);
|
||||||
|
|
||||||
|
@ -6624,7 +6628,16 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
||||||
|
|
||||||
commit_pipe_post_planes(state, crtc);
|
commit_pipe_post_planes(state, crtc);
|
||||||
|
|
||||||
intel_pipe_update_end(new_crtc_state);
|
intel_pipe_update_end(state, crtc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VRR/Seamless M/N update may need to update frame timings.
|
||||||
|
*
|
||||||
|
* FIXME Should be synchronized with the start of vblank somehow...
|
||||||
|
*/
|
||||||
|
if (vrr_enabling(old_crtc_state, new_crtc_state) || new_crtc_state->update_m_n)
|
||||||
|
intel_crtc_update_active_timings(new_crtc_state,
|
||||||
|
new_crtc_state->vrr.enable);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We usually enable FIFO underrun interrupts as part of the
|
* We usually enable FIFO underrun interrupts as part of the
|
||||||
|
|
|
@ -46,6 +46,7 @@ struct drm_printer;
|
||||||
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
|
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
|
||||||
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
|
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
|
||||||
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
|
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
|
||||||
|
#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
|
||||||
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
|
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
|
||||||
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
|
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
|
||||||
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
|
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
|
||||||
|
|
|
@ -1084,6 +1084,7 @@ struct intel_crtc_state {
|
||||||
|
|
||||||
unsigned fb_bits; /* framebuffers to flip */
|
unsigned fb_bits; /* framebuffers to flip */
|
||||||
bool update_pipe; /* can a fast modeset be performed? */
|
bool update_pipe; /* can a fast modeset be performed? */
|
||||||
|
bool update_m_n; /* update M/N seamlessly during fastset? */
|
||||||
bool disable_cxsr;
|
bool disable_cxsr;
|
||||||
bool update_wm_pre, update_wm_post; /* watermarks are updated */
|
bool update_wm_pre, update_wm_post; /* watermarks are updated */
|
||||||
bool fifo_changed; /* FIFO split is changed */
|
bool fifo_changed; /* FIFO split is changed */
|
||||||
|
@ -1196,7 +1197,6 @@ struct intel_crtc_state {
|
||||||
/* m2_n2 for eDP downclock */
|
/* m2_n2 for eDP downclock */
|
||||||
struct intel_link_m_n dp_m2_n2;
|
struct intel_link_m_n dp_m2_n2;
|
||||||
bool has_drrs;
|
bool has_drrs;
|
||||||
bool seamless_m_n;
|
|
||||||
|
|
||||||
/* PSR is supported but might not be enabled due the lack of enabled planes */
|
/* PSR is supported but might not be enabled due the lack of enabled planes */
|
||||||
bool has_psr;
|
bool has_psr;
|
||||||
|
|
|
@ -1310,13 +1310,14 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
|
||||||
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
|
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
|
||||||
const struct intel_crtc_state *pipe_config)
|
const struct intel_crtc_state *pipe_config)
|
||||||
{
|
{
|
||||||
|
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||||
|
|
||||||
/* On TGL, FEC is supported on all Pipes */
|
|
||||||
if (DISPLAY_VER(dev_priv) >= 12)
|
if (DISPLAY_VER(dev_priv) >= 12)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
|
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
|
||||||
|
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -2147,8 +2148,12 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
|
||||||
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
|
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
|
||||||
int pixel_clock;
|
int pixel_clock;
|
||||||
|
|
||||||
if (has_seamless_m_n(connector))
|
/*
|
||||||
pipe_config->seamless_m_n = true;
|
* FIXME all joined pipes share the same transcoder.
|
||||||
|
* Need to account for that when updating M/N live.
|
||||||
|
*/
|
||||||
|
if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
|
||||||
|
pipe_config->update_m_n = true;
|
||||||
|
|
||||||
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
|
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
|
||||||
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
|
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
|
||||||
|
|
|
@ -964,7 +964,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (DISPLAY_VER(dev_priv) >= 10 &&
|
if (HAS_DSC_MST(dev_priv) &&
|
||||||
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
|
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
|
||||||
/*
|
/*
|
||||||
* TBD pass the connector BPC,
|
* TBD pass the connector BPC,
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include "gt/intel_engine.h"
|
#include "gt/intel_engine.h"
|
||||||
#include "gt/intel_engine_heartbeat.h"
|
#include "gt/intel_engine_heartbeat.h"
|
||||||
#include "gt/intel_gt.h"
|
#include "gt/intel_gt.h"
|
||||||
|
#include "gt/intel_gt_pm.h"
|
||||||
#include "gt/intel_gt_requests.h"
|
#include "gt/intel_gt_requests.h"
|
||||||
#include "gt/intel_tlb.h"
|
#include "gt/intel_tlb.h"
|
||||||
|
|
||||||
|
@ -102,12 +103,34 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
|
||||||
|
|
||||||
static int __i915_vma_active(struct i915_active *ref)
|
static int __i915_vma_active(struct i915_active *ref)
|
||||||
{
|
{
|
||||||
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
|
struct i915_vma *vma = active_to_vma(ref);
|
||||||
|
|
||||||
|
if (!i915_vma_tryget(vma))
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Exclude global GTT VMA from holding a GT wakeref
|
||||||
|
* while active, otherwise GPU never goes idle.
|
||||||
|
*/
|
||||||
|
if (!i915_vma_is_ggtt(vma))
|
||||||
|
intel_gt_pm_get(vma->vm->gt);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __i915_vma_retire(struct i915_active *ref)
|
static void __i915_vma_retire(struct i915_active *ref)
|
||||||
{
|
{
|
||||||
i915_vma_put(active_to_vma(ref));
|
struct i915_vma *vma = active_to_vma(ref);
|
||||||
|
|
||||||
|
if (!i915_vma_is_ggtt(vma)) {
|
||||||
|
/*
|
||||||
|
* Since we can be called from atomic contexts,
|
||||||
|
* use an async variant of intel_gt_pm_put().
|
||||||
|
*/
|
||||||
|
intel_gt_pm_put_async(vma->vm->gt);
|
||||||
|
}
|
||||||
|
|
||||||
|
i915_vma_put(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_vma *
|
static struct i915_vma *
|
||||||
|
@ -1403,7 +1426,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
|
||||||
struct i915_vma_work *work = NULL;
|
struct i915_vma_work *work = NULL;
|
||||||
struct dma_fence *moving = NULL;
|
struct dma_fence *moving = NULL;
|
||||||
struct i915_vma_resource *vma_res = NULL;
|
struct i915_vma_resource *vma_res = NULL;
|
||||||
intel_wakeref_t wakeref = 0;
|
intel_wakeref_t wakeref;
|
||||||
unsigned int bound;
|
unsigned int bound;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -1423,8 +1446,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (flags & PIN_GLOBAL)
|
/*
|
||||||
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
|
* In case of a global GTT, we must hold a runtime-pm wakeref
|
||||||
|
* while global PTEs are updated. In other cases, we hold
|
||||||
|
* the rpm reference while the VMA is active. Since runtime
|
||||||
|
* resume may require allocations, which are forbidden inside
|
||||||
|
* vm->mutex, get the first rpm wakeref outside of the mutex.
|
||||||
|
*/
|
||||||
|
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
|
||||||
|
|
||||||
if (flags & vma->vm->bind_async_flags) {
|
if (flags & vma->vm->bind_async_flags) {
|
||||||
/* lock VM */
|
/* lock VM */
|
||||||
|
@ -1560,8 +1589,7 @@ err_fence:
|
||||||
if (work)
|
if (work)
|
||||||
dma_fence_work_commit_imm(&work->base);
|
dma_fence_work_commit_imm(&work->base);
|
||||||
err_rpm:
|
err_rpm:
|
||||||
if (wakeref)
|
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
|
||||||
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
|
|
||||||
|
|
||||||
if (moving)
|
if (moving)
|
||||||
dma_fence_put(moving);
|
dma_fence_put(moving);
|
||||||
|
|
|
@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
|
||||||
.name = "sspp_0", .id = SSPP_VIG0,
|
.name = "sspp_0", .id = SSPP_VIG0,
|
||||||
.base = 0x4000, .len = 0x1f0,
|
.base = 0x4000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_0,
|
.sblk = &sm8150_vig_sblk_0,
|
||||||
.xin_id = 0,
|
.xin_id = 0,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG0,
|
.clk_ctrl = DPU_CLK_CTRL_VIG0,
|
||||||
|
@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
|
||||||
.name = "sspp_1", .id = SSPP_VIG1,
|
.name = "sspp_1", .id = SSPP_VIG1,
|
||||||
.base = 0x6000, .len = 0x1f0,
|
.base = 0x6000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_1,
|
.sblk = &sm8150_vig_sblk_1,
|
||||||
.xin_id = 4,
|
.xin_id = 4,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG1,
|
.clk_ctrl = DPU_CLK_CTRL_VIG1,
|
||||||
|
@ -93,7 +93,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
|
||||||
.name = "sspp_2", .id = SSPP_VIG2,
|
.name = "sspp_2", .id = SSPP_VIG2,
|
||||||
.base = 0x8000, .len = 0x1f0,
|
.base = 0x8000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_2,
|
.sblk = &sm8150_vig_sblk_2,
|
||||||
.xin_id = 8,
|
.xin_id = 8,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG2,
|
.clk_ctrl = DPU_CLK_CTRL_VIG2,
|
||||||
|
@ -101,7 +101,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
|
||||||
.name = "sspp_3", .id = SSPP_VIG3,
|
.name = "sspp_3", .id = SSPP_VIG3,
|
||||||
.base = 0xa000, .len = 0x1f0,
|
.base = 0xa000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_3,
|
.sblk = &sm8150_vig_sblk_3,
|
||||||
.xin_id = 12,
|
.xin_id = 12,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG3,
|
.clk_ctrl = DPU_CLK_CTRL_VIG3,
|
||||||
|
|
|
@ -76,7 +76,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
|
||||||
.name = "sspp_0", .id = SSPP_VIG0,
|
.name = "sspp_0", .id = SSPP_VIG0,
|
||||||
.base = 0x4000, .len = 0x1f0,
|
.base = 0x4000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_0,
|
.sblk = &sm8150_vig_sblk_0,
|
||||||
.xin_id = 0,
|
.xin_id = 0,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG0,
|
.clk_ctrl = DPU_CLK_CTRL_VIG0,
|
||||||
|
@ -84,7 +84,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
|
||||||
.name = "sspp_1", .id = SSPP_VIG1,
|
.name = "sspp_1", .id = SSPP_VIG1,
|
||||||
.base = 0x6000, .len = 0x1f0,
|
.base = 0x6000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_1,
|
.sblk = &sm8150_vig_sblk_1,
|
||||||
.xin_id = 4,
|
.xin_id = 4,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG1,
|
.clk_ctrl = DPU_CLK_CTRL_VIG1,
|
||||||
|
@ -92,7 +92,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
|
||||||
.name = "sspp_2", .id = SSPP_VIG2,
|
.name = "sspp_2", .id = SSPP_VIG2,
|
||||||
.base = 0x8000, .len = 0x1f0,
|
.base = 0x8000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_2,
|
.sblk = &sm8150_vig_sblk_2,
|
||||||
.xin_id = 8,
|
.xin_id = 8,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG2,
|
.clk_ctrl = DPU_CLK_CTRL_VIG2,
|
||||||
|
@ -100,7 +100,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
|
||||||
.name = "sspp_3", .id = SSPP_VIG3,
|
.name = "sspp_3", .id = SSPP_VIG3,
|
||||||
.base = 0xa000, .len = 0x1f0,
|
.base = 0xa000, .len = 0x1f0,
|
||||||
.features = VIG_SDM845_MASK,
|
.features = VIG_SDM845_MASK,
|
||||||
.sblk = &sdm845_vig_sblk_3,
|
.sblk = &sm8150_vig_sblk_3,
|
||||||
.xin_id = 12,
|
.xin_id = 12,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG3,
|
.clk_ctrl = DPU_CLK_CTRL_VIG3,
|
||||||
|
|
|
@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
|
||||||
.name = "sspp_0", .id = SSPP_VIG0,
|
.name = "sspp_0", .id = SSPP_VIG0,
|
||||||
.base = 0x4000, .len = 0x32c,
|
.base = 0x4000, .len = 0x32c,
|
||||||
.features = VIG_SC7180_MASK,
|
.features = VIG_SC7180_MASK,
|
||||||
.sblk = &sm8250_vig_sblk_0,
|
.sblk = &sm8450_vig_sblk_0,
|
||||||
.xin_id = 0,
|
.xin_id = 0,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG0,
|
.clk_ctrl = DPU_CLK_CTRL_VIG0,
|
||||||
|
@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
|
||||||
.name = "sspp_1", .id = SSPP_VIG1,
|
.name = "sspp_1", .id = SSPP_VIG1,
|
||||||
.base = 0x6000, .len = 0x32c,
|
.base = 0x6000, .len = 0x32c,
|
||||||
.features = VIG_SC7180_MASK,
|
.features = VIG_SC7180_MASK,
|
||||||
.sblk = &sm8250_vig_sblk_1,
|
.sblk = &sm8450_vig_sblk_1,
|
||||||
.xin_id = 4,
|
.xin_id = 4,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG1,
|
.clk_ctrl = DPU_CLK_CTRL_VIG1,
|
||||||
|
@ -93,7 +93,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
|
||||||
.name = "sspp_2", .id = SSPP_VIG2,
|
.name = "sspp_2", .id = SSPP_VIG2,
|
||||||
.base = 0x8000, .len = 0x32c,
|
.base = 0x8000, .len = 0x32c,
|
||||||
.features = VIG_SC7180_MASK,
|
.features = VIG_SC7180_MASK,
|
||||||
.sblk = &sm8250_vig_sblk_2,
|
.sblk = &sm8450_vig_sblk_2,
|
||||||
.xin_id = 8,
|
.xin_id = 8,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG2,
|
.clk_ctrl = DPU_CLK_CTRL_VIG2,
|
||||||
|
@ -101,7 +101,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
|
||||||
.name = "sspp_3", .id = SSPP_VIG3,
|
.name = "sspp_3", .id = SSPP_VIG3,
|
||||||
.base = 0xa000, .len = 0x32c,
|
.base = 0xa000, .len = 0x32c,
|
||||||
.features = VIG_SC7180_MASK,
|
.features = VIG_SC7180_MASK,
|
||||||
.sblk = &sm8250_vig_sblk_3,
|
.sblk = &sm8450_vig_sblk_3,
|
||||||
.xin_id = 12,
|
.xin_id = 12,
|
||||||
.type = SSPP_TYPE_VIG,
|
.type = SSPP_TYPE_VIG,
|
||||||
.clk_ctrl = DPU_CLK_CTRL_VIG3,
|
.clk_ctrl = DPU_CLK_CTRL_VIG3,
|
||||||
|
|
|
@ -250,14 +250,17 @@ static const uint32_t wb2_formats[] = {
|
||||||
* SSPP sub blocks config
|
* SSPP sub blocks config
|
||||||
*************************************************************/
|
*************************************************************/
|
||||||
|
|
||||||
|
#define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min))
|
||||||
|
|
||||||
/* SSPP common configuration */
|
/* SSPP common configuration */
|
||||||
#define _VIG_SBLK(sdma_pri, qseed_ver) \
|
#define _VIG_SBLK(sdma_pri, qseed_ver, scaler_ver) \
|
||||||
{ \
|
{ \
|
||||||
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
|
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
|
||||||
.maxupscale = MAX_UPSCALE_RATIO, \
|
.maxupscale = MAX_UPSCALE_RATIO, \
|
||||||
.smart_dma_priority = sdma_pri, \
|
.smart_dma_priority = sdma_pri, \
|
||||||
.scaler_blk = {.name = "scaler", \
|
.scaler_blk = {.name = "scaler", \
|
||||||
.id = qseed_ver, \
|
.id = qseed_ver, \
|
||||||
|
.version = scaler_ver, \
|
||||||
.base = 0xa00, .len = 0xa0,}, \
|
.base = 0xa00, .len = 0xa0,}, \
|
||||||
.csc_blk = {.name = "csc", \
|
.csc_blk = {.name = "csc", \
|
||||||
.id = DPU_SSPP_CSC_10BIT, \
|
.id = DPU_SSPP_CSC_10BIT, \
|
||||||
|
@ -269,13 +272,14 @@ static const uint32_t wb2_formats[] = {
|
||||||
.rotation_cfg = NULL, \
|
.rotation_cfg = NULL, \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
|
#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, scaler_ver, rot_cfg) \
|
||||||
{ \
|
{ \
|
||||||
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
|
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
|
||||||
.maxupscale = MAX_UPSCALE_RATIO, \
|
.maxupscale = MAX_UPSCALE_RATIO, \
|
||||||
.smart_dma_priority = sdma_pri, \
|
.smart_dma_priority = sdma_pri, \
|
||||||
.scaler_blk = {.name = "scaler", \
|
.scaler_blk = {.name = "scaler", \
|
||||||
.id = qseed_ver, \
|
.id = qseed_ver, \
|
||||||
|
.version = scaler_ver, \
|
||||||
.base = 0xa00, .len = 0xa0,}, \
|
.base = 0xa00, .len = 0xa0,}, \
|
||||||
.csc_blk = {.name = "csc", \
|
.csc_blk = {.name = "csc", \
|
||||||
.id = DPU_SSPP_CSC_10BIT, \
|
.id = DPU_SSPP_CSC_10BIT, \
|
||||||
|
@ -299,13 +303,17 @@ static const uint32_t wb2_formats[] = {
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
|
||||||
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 2));
|
||||||
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
|
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
|
||||||
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 2));
|
||||||
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
|
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
|
||||||
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 2));
|
||||||
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
|
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
|
||||||
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 2));
|
||||||
|
|
||||||
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
|
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
|
||||||
.rot_maxheight = 1088,
|
.rot_maxheight = 1088,
|
||||||
|
@ -314,13 +322,30 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
|
||||||
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 3));
|
||||||
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
|
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
|
||||||
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 3));
|
||||||
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
|
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
|
||||||
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 3));
|
||||||
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
|
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
|
||||||
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
|
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 3));
|
||||||
|
|
||||||
|
static const struct dpu_sspp_sub_blks sm8150_vig_sblk_0 =
|
||||||
|
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 4));
|
||||||
|
static const struct dpu_sspp_sub_blks sm8150_vig_sblk_1 =
|
||||||
|
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 4));
|
||||||
|
static const struct dpu_sspp_sub_blks sm8150_vig_sblk_2 =
|
||||||
|
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 4));
|
||||||
|
static const struct dpu_sspp_sub_blks sm8150_vig_sblk_3 =
|
||||||
|
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
|
||||||
|
SSPP_SCALER_VER(1, 4));
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
|
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
|
||||||
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
|
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
|
||||||
|
@ -328,34 +353,60 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
|
||||||
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
|
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
|
||||||
_VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0));
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
|
||||||
_VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
|
_VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0),
|
||||||
|
&dpu_rot_sc7280_cfg_v2);
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
|
||||||
_VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0));
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
|
||||||
_VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
|
_VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE,
|
||||||
|
SSPP_SCALER_VER(2, 4));
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
|
||||||
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0));
|
||||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
|
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
|
||||||
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0));
|
||||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
|
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
|
||||||
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0));
|
||||||
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
|
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
|
||||||
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 0));
|
||||||
|
|
||||||
|
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
|
||||||
|
_VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 1));
|
||||||
|
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
|
||||||
|
_VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 1));
|
||||||
|
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
|
||||||
|
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 1));
|
||||||
|
static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
|
||||||
|
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 1));
|
||||||
|
|
||||||
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
|
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
|
||||||
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 2));
|
||||||
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
|
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
|
||||||
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 2));
|
||||||
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
|
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
|
||||||
_VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 2));
|
||||||
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
|
static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
|
||||||
_VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
|
_VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4,
|
||||||
|
SSPP_SCALER_VER(3, 2));
|
||||||
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
|
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
|
||||||
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
|
static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
|
||||||
|
|
||||||
|
|
|
@ -269,7 +269,8 @@ enum {
|
||||||
/**
|
/**
|
||||||
* struct dpu_scaler_blk: Scaler information
|
* struct dpu_scaler_blk: Scaler information
|
||||||
* @info: HW register and features supported by this sub-blk
|
* @info: HW register and features supported by this sub-blk
|
||||||
* @version: qseed block revision
|
* @version: qseed block revision, on QSEED3+ platforms this is the value of
|
||||||
|
* scaler_blk.base + QSEED3_HW_VERSION registers.
|
||||||
*/
|
*/
|
||||||
struct dpu_scaler_blk {
|
struct dpu_scaler_blk {
|
||||||
DPU_HW_SUBBLK_INFO;
|
DPU_HW_SUBBLK_INFO;
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "nouveau_drv.h"
|
#include "nouveau_drv.h"
|
||||||
|
#include "nouveau_bios.h"
|
||||||
#include "nouveau_reg.h"
|
#include "nouveau_reg.h"
|
||||||
#include "dispnv04/hw.h"
|
#include "dispnv04/hw.h"
|
||||||
#include "nouveau_encoder.h"
|
#include "nouveau_encoder.h"
|
||||||
|
@ -1675,7 +1676,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
|
||||||
*/
|
*/
|
||||||
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
|
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
|
||||||
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
|
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1761,26 +1762,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
|
||||||
#ifdef __powerpc__
|
#ifdef __powerpc__
|
||||||
/* Apple iMac G4 NV17 */
|
/* Apple iMac G4 NV17 */
|
||||||
if (of_machine_is_compatible("PowerMac4,5")) {
|
if (of_machine_is_compatible("PowerMac4,5")) {
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
|
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Make up some sane defaults */
|
/* Make up some sane defaults */
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
|
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
|
||||||
bios->legacy.i2c_indices.crt, 1, 1);
|
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
|
||||||
|
|
||||||
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
|
||||||
bios->legacy.i2c_indices.tv,
|
bios->legacy.i2c_indices.tv,
|
||||||
all_heads, 0);
|
all_heads, DCB_OUTPUT_A);
|
||||||
|
|
||||||
else if (bios->tmds.output0_script_ptr ||
|
else if (bios->tmds.output0_script_ptr ||
|
||||||
bios->tmds.output1_script_ptr)
|
bios->tmds.output1_script_ptr)
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
|
||||||
bios->legacy.i2c_indices.panel,
|
bios->legacy.i2c_indices.panel,
|
||||||
all_heads, 1);
|
all_heads, DCB_OUTPUT_B);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
|
@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
||||||
void __iomem *map = NULL;
|
void __iomem *map = NULL;
|
||||||
|
|
||||||
/* Already mapped? */
|
/* Already mapped? */
|
||||||
if (refcount_inc_not_zero(&iobj->maps))
|
if (refcount_inc_not_zero(&iobj->maps)) {
|
||||||
|
/* read barrier match the wmb on refcount set */
|
||||||
|
smp_rmb();
|
||||||
return iobj->map;
|
return iobj->map;
|
||||||
|
}
|
||||||
|
|
||||||
/* Take the lock, and re-check that another thread hasn't
|
/* Take the lock, and re-check that another thread hasn't
|
||||||
* already mapped the object in the meantime.
|
* already mapped the object in the meantime.
|
||||||
|
@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
||||||
iobj->base.memory.ptrs = &nv50_instobj_fast;
|
iobj->base.memory.ptrs = &nv50_instobj_fast;
|
||||||
else
|
else
|
||||||
iobj->base.memory.ptrs = &nv50_instobj_slow;
|
iobj->base.memory.ptrs = &nv50_instobj_slow;
|
||||||
|
/* barrier to ensure the ptrs are written before refcount is set */
|
||||||
|
smp_wmb();
|
||||||
refcount_set(&iobj->maps, 1);
|
refcount_set(&iobj->maps, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -261,8 +261,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
|
||||||
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
|
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
|
||||||
|
|
||||||
mipi_dsi_detach(ctx->dsi);
|
mipi_dsi_detach(ctx->dsi);
|
||||||
mipi_dsi_device_unregister(ctx->dsi);
|
|
||||||
|
|
||||||
drm_panel_remove(&ctx->panel);
|
drm_panel_remove(&ctx->panel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -922,8 +922,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
||||||
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
|
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
|
||||||
|
|
||||||
for (i = 0; i < max_device; i++) {
|
for (i = 0; i < max_device; i++) {
|
||||||
ATOM_CONNECTOR_INFO_I2C ci =
|
ATOM_CONNECTOR_INFO_I2C ci;
|
||||||
supported_devices->info.asConnInfo[i];
|
|
||||||
|
if (frev > 1)
|
||||||
|
ci = supported_devices->info_2d1.asConnInfo[i];
|
||||||
|
else
|
||||||
|
ci = supported_devices->info.asConnInfo[i];
|
||||||
|
|
||||||
bios_connectors[i].valid = false;
|
bios_connectors[i].valid = false;
|
||||||
|
|
||||||
|
|
|
@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||||
.no_wait_gpu = false
|
.no_wait_gpu = false
|
||||||
};
|
};
|
||||||
u32 j, initial_line = dst_offset / dst_stride;
|
u32 j, initial_line = dst_offset / dst_stride;
|
||||||
struct vmw_bo_blit_line_data d;
|
struct vmw_bo_blit_line_data d = {0};
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct page **dst_pages = NULL;
|
||||||
|
struct page **src_pages = NULL;
|
||||||
|
|
||||||
/* Buffer objects need to be either pinned or reserved: */
|
/* Buffer objects need to be either pinned or reserved: */
|
||||||
if (!(dst->pin_count))
|
if (!(dst->pin_count))
|
||||||
|
@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!src->ttm->pages && src->ttm->sg) {
|
||||||
|
src_pages = kvmalloc_array(src->ttm->num_pages,
|
||||||
|
sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!src_pages)
|
||||||
|
return -ENOMEM;
|
||||||
|
ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
|
||||||
|
src->ttm->num_pages);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (!dst->ttm->pages && dst->ttm->sg) {
|
||||||
|
dst_pages = kvmalloc_array(dst->ttm->num_pages,
|
||||||
|
sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!dst_pages) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
|
||||||
|
dst->ttm->num_pages);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
d.mapped_dst = 0;
|
d.mapped_dst = 0;
|
||||||
d.mapped_src = 0;
|
d.mapped_src = 0;
|
||||||
d.dst_addr = NULL;
|
d.dst_addr = NULL;
|
||||||
d.src_addr = NULL;
|
d.src_addr = NULL;
|
||||||
d.dst_pages = dst->ttm->pages;
|
d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
|
||||||
d.src_pages = src->ttm->pages;
|
d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
|
||||||
d.dst_num_pages = PFN_UP(dst->resource->size);
|
d.dst_num_pages = PFN_UP(dst->resource->size);
|
||||||
d.src_num_pages = PFN_UP(src->resource->size);
|
d.src_num_pages = PFN_UP(src->resource->size);
|
||||||
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
||||||
|
@ -504,6 +529,10 @@ out:
|
||||||
kunmap_atomic(d.src_addr);
|
kunmap_atomic(d.src_addr);
|
||||||
if (d.dst_addr)
|
if (d.dst_addr)
|
||||||
kunmap_atomic(d.dst_addr);
|
kunmap_atomic(d.dst_addr);
|
||||||
|
if (src_pages)
|
||||||
|
kvfree(src_pages);
|
||||||
|
if (dst_pages)
|
||||||
|
kvfree(dst_pages);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
||||||
{
|
{
|
||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = params->bo_type != ttm_bo_type_kernel,
|
.interruptible = params->bo_type != ttm_bo_type_kernel,
|
||||||
.no_wait_gpu = false
|
.no_wait_gpu = false,
|
||||||
|
.resv = params->resv,
|
||||||
};
|
};
|
||||||
struct ttm_device *bdev = &dev_priv->bdev;
|
struct ttm_device *bdev = &dev_priv->bdev;
|
||||||
struct drm_device *vdev = &dev_priv->drm;
|
struct drm_device *vdev = &dev_priv->drm;
|
||||||
|
@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
||||||
|
|
||||||
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
|
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
|
||||||
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
|
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
|
||||||
&vmw_bo->placement, 0, &ctx, NULL,
|
&vmw_bo->placement, 0, &ctx,
|
||||||
NULL, destroy);
|
params->sg, params->resv, destroy);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,8 @@ struct vmw_bo_params {
|
||||||
enum ttm_bo_type bo_type;
|
enum ttm_bo_type bo_type;
|
||||||
size_t size;
|
size_t size;
|
||||||
bool pin;
|
bool pin;
|
||||||
|
struct dma_resv *resv;
|
||||||
|
struct sg_table *sg;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
|
||||||
|
|
||||||
.prime_fd_to_handle = vmw_prime_fd_to_handle,
|
.prime_fd_to_handle = vmw_prime_fd_to_handle,
|
||||||
.prime_handle_to_fd = vmw_prime_handle_to_fd,
|
.prime_handle_to_fd = vmw_prime_handle_to_fd,
|
||||||
|
.gem_prime_import_sg_table = vmw_prime_import_sg_table,
|
||||||
|
|
||||||
.fops = &vmwgfx_driver_fops,
|
.fops = &vmwgfx_driver_fops,
|
||||||
.name = VMWGFX_DRIVER_NAME,
|
.name = VMWGFX_DRIVER_NAME,
|
||||||
|
|
|
@ -1131,6 +1131,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
|
||||||
struct drm_file *file_priv,
|
struct drm_file *file_priv,
|
||||||
uint32_t handle, uint32_t flags,
|
uint32_t handle, uint32_t flags,
|
||||||
int *prime_fd);
|
int *prime_fd);
|
||||||
|
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
||||||
|
struct dma_buf_attachment *attach,
|
||||||
|
struct sg_table *table);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MemoryOBject management - vmwgfx_mob.c
|
* MemoryOBject management - vmwgfx_mob.c
|
||||||
|
|
|
@ -149,6 +149,38 @@ out_no_bo:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
||||||
|
struct dma_buf_attachment *attach,
|
||||||
|
struct sg_table *table)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
|
struct drm_gem_object *gem = NULL;
|
||||||
|
struct vmw_bo *vbo;
|
||||||
|
struct vmw_bo_params params = {
|
||||||
|
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
|
||||||
|
.busy_domain = VMW_BO_DOMAIN_SYS,
|
||||||
|
.bo_type = ttm_bo_type_sg,
|
||||||
|
.size = attach->dmabuf->size,
|
||||||
|
.pin = false,
|
||||||
|
.resv = attach->dmabuf->resv,
|
||||||
|
.sg = table,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
dma_resv_lock(params.resv, NULL);
|
||||||
|
|
||||||
|
ret = vmw_bo_create(dev_priv, ¶ms, &vbo);
|
||||||
|
if (ret != 0)
|
||||||
|
goto out_no_bo;
|
||||||
|
|
||||||
|
vbo->tbo.base.funcs = &vmw_gem_object_funcs;
|
||||||
|
|
||||||
|
gem = &vbo->tbo.base;
|
||||||
|
out_no_bo:
|
||||||
|
dma_resv_unlock(params.resv);
|
||||||
|
return gem;
|
||||||
|
}
|
||||||
|
|
||||||
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
|
|
|
@ -926,6 +926,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
|
||||||
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||||
struct drm_atomic_state *state)
|
struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
|
struct vmw_private *vmw = vmw_priv(crtc->dev);
|
||||||
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
|
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
|
||||||
crtc);
|
crtc);
|
||||||
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
|
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
|
||||||
|
@ -933,9 +934,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||||
bool has_primary = new_state->plane_mask &
|
bool has_primary = new_state->plane_mask &
|
||||||
drm_plane_mask(crtc->primary);
|
drm_plane_mask(crtc->primary);
|
||||||
|
|
||||||
/* We always want to have an active plane with an active CRTC */
|
/*
|
||||||
if (has_primary != new_state->enable)
|
* This is fine in general, but broken userspace might expect
|
||||||
return -EINVAL;
|
* some actual rendering so give a clue as why it's blank.
|
||||||
|
*/
|
||||||
|
if (new_state->enable && !has_primary)
|
||||||
|
drm_dbg_driver(&vmw->drm,
|
||||||
|
"CRTC without a primary plane will be blank.\n");
|
||||||
|
|
||||||
|
|
||||||
if (new_state->connector_mask != connector_mask &&
|
if (new_state->connector_mask != connector_mask &&
|
||||||
|
|
|
@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
|
||||||
|
|
||||||
|
|
||||||
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
|
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
|
||||||
DRM_FORMAT_XRGB1555,
|
|
||||||
DRM_FORMAT_RGB565,
|
|
||||||
DRM_FORMAT_XRGB8888,
|
DRM_FORMAT_XRGB8888,
|
||||||
DRM_FORMAT_ARGB8888,
|
DRM_FORMAT_ARGB8888,
|
||||||
|
DRM_FORMAT_RGB565,
|
||||||
|
DRM_FORMAT_XRGB1555,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
|
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
|
||||||
|
|
|
@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
|
||||||
int fd, u32 *handle)
|
int fd, u32 *handle)
|
||||||
{
|
{
|
||||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
|
int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
|
||||||
|
|
||||||
return ttm_prime_fd_to_handle(tfile, fd, handle);
|
if (ret)
|
||||||
|
ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vmw_prime_handle_to_fd(struct drm_device *dev,
|
int vmw_prime_handle_to_fd(struct drm_device *dev,
|
||||||
|
@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
|
||||||
int *prime_fd)
|
int *prime_fd)
|
||||||
{
|
{
|
||||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
|
int ret;
|
||||||
|
|
||||||
|
if (handle > VMWGFX_NUM_MOB)
|
||||||
|
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
|
||||||
|
else
|
||||||
|
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -220,13 +220,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
switch (dev_priv->map_mode) {
|
switch (dev_priv->map_mode) {
|
||||||
case vmw_dma_map_bind:
|
case vmw_dma_map_bind:
|
||||||
case vmw_dma_map_populate:
|
case vmw_dma_map_populate:
|
||||||
vsgt->sgt = &vmw_tt->sgt;
|
if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
|
||||||
ret = sg_alloc_table_from_pages_segment(
|
vsgt->sgt = vmw_tt->dma_ttm.sg;
|
||||||
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
|
} else {
|
||||||
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
|
vsgt->sgt = &vmw_tt->sgt;
|
||||||
dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
|
ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
|
||||||
if (ret)
|
vsgt->pages, vsgt->num_pages, 0,
|
||||||
goto out_sg_alloc_fail;
|
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
|
||||||
|
dma_get_max_seg_size(dev_priv->drm.dev),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (ret)
|
||||||
|
goto out_sg_alloc_fail;
|
||||||
|
}
|
||||||
|
|
||||||
ret = vmw_ttm_map_for_dma(vmw_tt);
|
ret = vmw_ttm_map_for_dma(vmw_tt);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
|
@ -241,8 +246,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_map_fail:
|
out_map_fail:
|
||||||
sg_free_table(vmw_tt->vsgt.sgt);
|
drm_warn(&dev_priv->drm, "VSG table map failed!");
|
||||||
vmw_tt->vsgt.sgt = NULL;
|
sg_free_table(vsgt->sgt);
|
||||||
|
vsgt->sgt = NULL;
|
||||||
out_sg_alloc_fail:
|
out_sg_alloc_fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -388,15 +394,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
||||||
static int vmw_ttm_populate(struct ttm_device *bdev,
|
static int vmw_ttm_populate(struct ttm_device *bdev,
|
||||||
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||||
{
|
{
|
||||||
int ret;
|
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||||
|
|
||||||
/* TODO: maybe completely drop this ? */
|
|
||||||
if (ttm_tt_is_populated(ttm))
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
if (external && ttm->sg)
|
||||||
|
return drm_prime_sg_to_dma_addr_array(ttm->sg,
|
||||||
|
ttm->dma_address,
|
||||||
|
ttm->num_pages);
|
||||||
|
|
||||||
return ret;
|
return ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
||||||
|
@ -404,6 +412,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
||||||
dma_ttm);
|
dma_ttm);
|
||||||
|
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||||
|
|
||||||
|
if (external)
|
||||||
|
return;
|
||||||
|
|
||||||
vmw_ttm_unbind(bdev, ttm);
|
vmw_ttm_unbind(bdev, ttm);
|
||||||
|
|
||||||
|
@ -422,6 +434,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_be;
|
struct vmw_ttm_tt *vmw_be;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool external = bo->type == ttm_bo_type_sg;
|
||||||
|
|
||||||
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
|
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||||
if (!vmw_be)
|
if (!vmw_be)
|
||||||
|
@ -430,7 +443,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||||
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
|
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
|
||||||
vmw_be->mob = NULL;
|
vmw_be->mob = NULL;
|
||||||
|
|
||||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
if (external)
|
||||||
|
page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
|
||||||
|
|
||||||
|
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
|
||||||
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
||||||
ttm_cached);
|
ttm_cached);
|
||||||
else
|
else
|
||||||
|
|
|
@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
|
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
|
||||||
|
enum ib_cm_state old_state)
|
||||||
{
|
{
|
||||||
struct cm_id_private *cm_id_priv;
|
struct cm_id_private *cm_id_priv;
|
||||||
|
|
||||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||||
pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
|
pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
|
||||||
cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
||||||
{
|
{
|
||||||
struct cm_id_private *cm_id_priv;
|
struct cm_id_private *cm_id_priv;
|
||||||
|
enum ib_cm_state old_state;
|
||||||
struct cm_work *work;
|
struct cm_work *work;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||||
spin_lock_irq(&cm_id_priv->lock);
|
spin_lock_irq(&cm_id_priv->lock);
|
||||||
|
old_state = cm_id->state;
|
||||||
retest:
|
retest:
|
||||||
switch (cm_id->state) {
|
switch (cm_id->state) {
|
||||||
case IB_CM_LISTEN:
|
case IB_CM_LISTEN:
|
||||||
|
@ -1151,7 +1154,7 @@ retest:
|
||||||
msecs_to_jiffies(
|
msecs_to_jiffies(
|
||||||
CM_DESTROY_ID_WAIT_TIMEOUT));
|
CM_DESTROY_ID_WAIT_TIMEOUT));
|
||||||
if (!ret) /* timeout happened */
|
if (!ret) /* timeout happened */
|
||||||
cm_destroy_id_wait_timeout(cm_id);
|
cm_destroy_id_wait_timeout(cm_id, old_state);
|
||||||
} while (!ret);
|
} while (!ret);
|
||||||
|
|
||||||
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
|
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
|
||||||
|
|
|
@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
|
||||||
mdev = dev->mdev;
|
mdev = dev->mdev;
|
||||||
mdev_port_num = 1;
|
mdev_port_num = 1;
|
||||||
}
|
}
|
||||||
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
|
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
|
||||||
|
!mlx5_core_mp_enabled(mdev)) {
|
||||||
/* set local port to one for Function-Per-Port HCA. */
|
/* set local port to one for Function-Per-Port HCA. */
|
||||||
mdev = dev->mdev;
|
mdev = dev->mdev;
|
||||||
mdev_port_num = 1;
|
mdev_port_num = 1;
|
||||||
|
|
|
@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
|
||||||
|
|
||||||
if (rxe->tfm)
|
if (rxe->tfm)
|
||||||
crypto_free_shash(rxe->tfm);
|
crypto_free_shash(rxe->tfm);
|
||||||
|
|
||||||
|
mutex_destroy(&rxe->usdev_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initialize rxe device parameters */
|
/* initialize rxe device parameters */
|
||||||
|
|
|
@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
|
||||||
|
|
||||||
path->num_nodes = num_nodes;
|
path->num_nodes = num_nodes;
|
||||||
|
|
||||||
|
mutex_lock(&icc_bw_lock);
|
||||||
|
|
||||||
for (i = num_nodes - 1; i >= 0; i--) {
|
for (i = num_nodes - 1; i >= 0; i--) {
|
||||||
node->provider->users++;
|
node->provider->users++;
|
||||||
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
|
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
|
||||||
|
@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
|
||||||
node = node->reverse;
|
node = node->reverse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&icc_bw_lock);
|
||||||
|
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
|
||||||
pr_err("%s: error (%d)\n", __func__, ret);
|
pr_err("%s: error (%d)\n", __func__, ret);
|
||||||
|
|
||||||
mutex_lock(&icc_lock);
|
mutex_lock(&icc_lock);
|
||||||
|
mutex_lock(&icc_bw_lock);
|
||||||
|
|
||||||
for (i = 0; i < path->num_nodes; i++) {
|
for (i = 0; i < path->num_nodes; i++) {
|
||||||
node = path->reqs[i].node;
|
node = path->reqs[i].node;
|
||||||
hlist_del(&path->reqs[i].req_node);
|
hlist_del(&path->reqs[i].req_node);
|
||||||
if (!WARN_ON(!node->provider->users))
|
if (!WARN_ON(!node->provider->users))
|
||||||
node->provider->users--;
|
node->provider->users--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&icc_bw_lock);
|
||||||
mutex_unlock(&icc_lock);
|
mutex_unlock(&icc_lock);
|
||||||
|
|
||||||
kfree_const(path->name);
|
kfree_const(path->name);
|
||||||
|
|
|
@ -2648,9 +2648,14 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start with count 1, driver can increase it in queue_setup()
|
* Start with q->min_buffers_needed + 1, driver can increase it in
|
||||||
|
* queue_setup()
|
||||||
|
*
|
||||||
|
* 'min_buffers_needed' buffers need to be queued up before you
|
||||||
|
* can start streaming, plus 1 for userspace (or in this case,
|
||||||
|
* kernelspace) processing.
|
||||||
*/
|
*/
|
||||||
count = 1;
|
count = max(2, q->min_buffers_needed + 1);
|
||||||
|
|
||||||
dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
|
dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
|
||||||
(read) ? "read" : "write", count, q->fileio_read_once,
|
(read) ? "read" : "write", count, q->fileio_read_once,
|
||||||
|
|
|
@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
|
||||||
|
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
|
||||||
|
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
|
||||||
|
|
|
@ -1948,14 +1948,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
|
||||||
|
|
||||||
static int mt753x_mirror_port_get(unsigned int id, u32 val)
|
static int mt753x_mirror_port_get(unsigned int id, u32 val)
|
||||||
{
|
{
|
||||||
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
|
return (id == ID_MT7531 || id == ID_MT7988) ?
|
||||||
MIRROR_PORT(val);
|
MT7531_MIRROR_PORT_GET(val) :
|
||||||
|
MIRROR_PORT(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mt753x_mirror_port_set(unsigned int id, u32 val)
|
static int mt753x_mirror_port_set(unsigned int id, u32 val)
|
||||||
{
|
{
|
||||||
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
|
return (id == ID_MT7531 || id == ID_MT7988) ?
|
||||||
MIRROR_PORT(val);
|
MT7531_MIRROR_PORT_SET(val) :
|
||||||
|
MIRROR_PORT(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
|
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
|
||||||
|
@ -2470,8 +2472,6 @@ mt7530_setup(struct dsa_switch *ds)
|
||||||
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
|
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
|
||||||
SYS_CTRL_REG_RST);
|
SYS_CTRL_REG_RST);
|
||||||
|
|
||||||
mt7530_pll_setup(priv);
|
|
||||||
|
|
||||||
/* Lower Tx driving for TRGMII path */
|
/* Lower Tx driving for TRGMII path */
|
||||||
for (i = 0; i < NUM_TRGMII_CTRL; i++)
|
for (i = 0; i < NUM_TRGMII_CTRL; i++)
|
||||||
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
|
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
|
||||||
|
@ -2489,6 +2489,9 @@ mt7530_setup(struct dsa_switch *ds)
|
||||||
|
|
||||||
priv->p6_interface = PHY_INTERFACE_MODE_NA;
|
priv->p6_interface = PHY_INTERFACE_MODE_NA;
|
||||||
|
|
||||||
|
if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
|
||||||
|
mt7530_pll_setup(priv);
|
||||||
|
|
||||||
mt753x_trap_frames(priv);
|
mt753x_trap_frames(priv);
|
||||||
|
|
||||||
/* Enable and reset MIB counters */
|
/* Enable and reset MIB counters */
|
||||||
|
@ -2518,6 +2521,9 @@ mt7530_setup(struct dsa_switch *ds)
|
||||||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Allow mirroring frames received on the local port (monitor port). */
|
||||||
|
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
|
||||||
|
|
||||||
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
||||||
ret = mt7530_setup_vlan0(priv);
|
ret = mt7530_setup_vlan0(priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -2626,6 +2632,9 @@ mt7531_setup_common(struct dsa_switch *ds)
|
||||||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Allow mirroring frames received on the local port (monitor port). */
|
||||||
|
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
|
||||||
|
|
||||||
/* Flush the FDB table */
|
/* Flush the FDB table */
|
||||||
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
|
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -2704,18 +2713,25 @@ mt7531_setup(struct dsa_switch *ds)
|
||||||
priv->p5_interface = PHY_INTERFACE_MODE_NA;
|
priv->p5_interface = PHY_INTERFACE_MODE_NA;
|
||||||
priv->p6_interface = PHY_INTERFACE_MODE_NA;
|
priv->p6_interface = PHY_INTERFACE_MODE_NA;
|
||||||
|
|
||||||
/* Enable PHY core PLL, since phy_device has not yet been created
|
/* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
|
||||||
* provided for phy_[read,write]_mmd_indirect is called, we provide
|
* phy_device has not yet been created provided for
|
||||||
* our own mt7531_ind_mmd_phy_[read,write] to complete this
|
* phy_[read,write]_mmd_indirect is called, we provide our own
|
||||||
* function.
|
* mt7531_ind_mmd_phy_[read,write] to complete this function.
|
||||||
*/
|
*/
|
||||||
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
|
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
|
||||||
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
|
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
|
||||||
val |= MT7531_PHY_PLL_BYPASS_MODE;
|
val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
|
||||||
val &= ~MT7531_PHY_PLL_OFF;
|
val &= ~MT7531_PHY_PLL_OFF;
|
||||||
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
|
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
|
||||||
CORE_PLL_GROUP4, val);
|
CORE_PLL_GROUP4, val);
|
||||||
|
|
||||||
|
/* Disable EEE advertisement on the switch PHYs. */
|
||||||
|
for (i = MT753X_CTRL_PHY_ADDR;
|
||||||
|
i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
|
||||||
|
mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
mt7531_setup_common(ds);
|
mt7531_setup_common(ds);
|
||||||
|
|
||||||
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
||||||
|
|
|
@ -32,6 +32,10 @@ enum mt753x_id {
|
||||||
#define SYSC_REG_RSTCTRL 0x34
|
#define SYSC_REG_RSTCTRL 0x34
|
||||||
#define RESET_MCM BIT(2)
|
#define RESET_MCM BIT(2)
|
||||||
|
|
||||||
|
/* Register for ARL global control */
|
||||||
|
#define MT753X_AGC 0xc
|
||||||
|
#define LOCAL_EN BIT(7)
|
||||||
|
|
||||||
/* Registers to mac forward control for unknown frames */
|
/* Registers to mac forward control for unknown frames */
|
||||||
#define MT7530_MFC 0x10
|
#define MT7530_MFC 0x10
|
||||||
#define BC_FFP(x) (((x) & 0xff) << 24)
|
#define BC_FFP(x) (((x) & 0xff) << 24)
|
||||||
|
@ -630,6 +634,7 @@ enum mt7531_clk_skew {
|
||||||
#define RG_SYSPLL_DDSFBK_EN BIT(12)
|
#define RG_SYSPLL_DDSFBK_EN BIT(12)
|
||||||
#define RG_SYSPLL_BIAS_EN BIT(11)
|
#define RG_SYSPLL_BIAS_EN BIT(11)
|
||||||
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
|
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
|
||||||
|
#define MT7531_RG_SYSPLL_DMY2 BIT(6)
|
||||||
#define MT7531_PHY_PLL_OFF BIT(5)
|
#define MT7531_PHY_PLL_OFF BIT(5)
|
||||||
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
|
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
|
||||||
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
|
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
|
||||||
* - Tunnel flag (present if tunnel)
|
* - Tunnel flag (present if tunnel)
|
||||||
*/
|
*/
|
||||||
|
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
|
||||||
|
lkups_cnt++;
|
||||||
|
|
||||||
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
|
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
|
||||||
lkups_cnt++;
|
lkups_cnt++;
|
||||||
|
@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
|
||||||
/* Always add direction metadata */
|
/* Always add direction metadata */
|
||||||
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
|
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
|
||||||
|
|
||||||
|
if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
|
||||||
|
ice_rule_add_src_vsi_metadata(&list[i]);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
|
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
|
||||||
if (tc_fltr->tunnel_type != TNL_LAST) {
|
if (tc_fltr->tunnel_type != TNL_LAST) {
|
||||||
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
|
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
|
||||||
|
@ -731,7 +738,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
|
if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
|
||||||
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
|
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -779,6 +786,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
||||||
|
|
||||||
/* specify the cookie as filter_rule_id */
|
/* specify the cookie as filter_rule_id */
|
||||||
rule_info.fltr_rule_id = fltr->cookie;
|
rule_info.fltr_rule_id = fltr->cookie;
|
||||||
|
rule_info.src_vsi = vsi->idx;
|
||||||
|
|
||||||
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
|
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
|
||||||
if (ret == -EEXIST) {
|
if (ret == -EEXIST) {
|
||||||
|
@ -1440,7 +1448,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
|
||||||
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
|
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
|
||||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
|
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
|
||||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
|
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
|
||||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
|
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
|
||||||
|
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
|
||||||
|
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
|
||||||
|
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
|
||||||
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
|
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -588,6 +588,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
||||||
|
|
||||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
|
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
|
||||||
struct flow_match_control match;
|
struct flow_match_control match;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
flow_rule_match_control(rule, &match);
|
flow_rule_match_control(rule, &match);
|
||||||
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
||||||
|
@ -596,12 +597,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||||
|
val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
|
||||||
if (ntohs(flow_spec->etype) == ETH_P_IP) {
|
if (ntohs(flow_spec->etype) == ETH_P_IP) {
|
||||||
flow_spec->ip_flag = IPV4_FLAG_MORE;
|
flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
|
||||||
flow_mask->ip_flag = IPV4_FLAG_MORE;
|
flow_mask->ip_flag = IPV4_FLAG_MORE;
|
||||||
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
|
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
|
||||||
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
|
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
|
||||||
flow_spec->next_header = IPPROTO_FRAGMENT;
|
flow_spec->next_header = val ?
|
||||||
|
IPPROTO_FRAGMENT : 0;
|
||||||
flow_mask->next_header = 0xff;
|
flow_mask->next_header = 0xff;
|
||||||
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
|
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -598,13 +598,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
|
||||||
static void
|
static void
|
||||||
mtk_wed_stop(struct mtk_wed_device *dev)
|
mtk_wed_stop(struct mtk_wed_device *dev)
|
||||||
{
|
{
|
||||||
|
mtk_wed_dma_disable(dev);
|
||||||
mtk_wed_set_ext_int(dev, false);
|
mtk_wed_set_ext_int(dev, false);
|
||||||
|
|
||||||
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
|
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
|
||||||
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
|
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
|
||||||
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
|
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
|
||||||
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
|
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
|
||||||
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
|
|
||||||
|
|
||||||
if (dev->hw->version == 1)
|
if (dev->hw->version == 1)
|
||||||
return;
|
return;
|
||||||
|
@ -617,7 +617,6 @@ static void
|
||||||
mtk_wed_deinit(struct mtk_wed_device *dev)
|
mtk_wed_deinit(struct mtk_wed_device *dev)
|
||||||
{
|
{
|
||||||
mtk_wed_stop(dev);
|
mtk_wed_stop(dev);
|
||||||
mtk_wed_dma_disable(dev);
|
|
||||||
|
|
||||||
wed_clr(dev, MTK_WED_CTRL,
|
wed_clr(dev, MTK_WED_CTRL,
|
||||||
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
|
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
|
||||||
|
@ -1703,9 +1702,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
|
||||||
static void
|
static void
|
||||||
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
|
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
|
||||||
{
|
{
|
||||||
if (!dev->running)
|
|
||||||
return;
|
|
||||||
|
|
||||||
mtk_wed_set_ext_int(dev, !!mask);
|
mtk_wed_set_ext_int(dev, !!mask);
|
||||||
wed_w32(dev, MTK_WED_INT_MASK, mask);
|
wed_w32(dev, MTK_WED_INT_MASK, mask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,10 @@ struct arfs_table {
|
||||||
struct hlist_head rules_hash[ARFS_HASH_SIZE];
|
struct hlist_head rules_hash[ARFS_HASH_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5E_ARFS_STATE_ENABLED,
|
||||||
|
};
|
||||||
|
|
||||||
enum arfs_type {
|
enum arfs_type {
|
||||||
ARFS_IPV4_TCP,
|
ARFS_IPV4_TCP,
|
||||||
ARFS_IPV6_TCP,
|
ARFS_IPV6_TCP,
|
||||||
|
@ -59,6 +63,7 @@ struct mlx5e_arfs_tables {
|
||||||
spinlock_t arfs_lock;
|
spinlock_t arfs_lock;
|
||||||
int last_filter_id;
|
int last_filter_id;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
|
unsigned long state;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arfs_tuple {
|
struct arfs_tuple {
|
||||||
|
@ -169,6 +174,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
|
||||||
int i;
|
int i;
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
|
clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
|
||||||
|
|
||||||
spin_lock_bh(&arfs->arfs_lock);
|
spin_lock_bh(&arfs->arfs_lock);
|
||||||
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
|
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
|
||||||
hlist_del_init(&rule->hlist);
|
hlist_del_init(&rule->hlist);
|
||||||
|
@ -626,17 +635,8 @@ static void arfs_handle_work(struct work_struct *work)
|
||||||
struct mlx5_flow_handle *rule;
|
struct mlx5_flow_handle *rule;
|
||||||
|
|
||||||
arfs = mlx5e_fs_get_arfs(priv->fs);
|
arfs = mlx5e_fs_get_arfs(priv->fs);
|
||||||
mutex_lock(&priv->state_lock);
|
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
|
||||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
return;
|
||||||
spin_lock_bh(&arfs->arfs_lock);
|
|
||||||
hlist_del(&arfs_rule->hlist);
|
|
||||||
spin_unlock_bh(&arfs->arfs_lock);
|
|
||||||
|
|
||||||
mutex_unlock(&priv->state_lock);
|
|
||||||
kfree(arfs_rule);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
mutex_unlock(&priv->state_lock);
|
|
||||||
|
|
||||||
if (!arfs_rule->rule) {
|
if (!arfs_rule->rule) {
|
||||||
rule = arfs_add_rule(priv, arfs_rule);
|
rule = arfs_add_rule(priv, arfs_rule);
|
||||||
|
@ -752,6 +752,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
spin_lock_bh(&arfs->arfs_lock);
|
spin_lock_bh(&arfs->arfs_lock);
|
||||||
|
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
|
||||||
|
spin_unlock_bh(&arfs->arfs_lock);
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
arfs_rule = arfs_find_rule(arfs_t, &fk);
|
arfs_rule = arfs_find_rule(arfs_t, &fk);
|
||||||
if (arfs_rule) {
|
if (arfs_rule) {
|
||||||
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
|
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
|
||||||
|
|
|
@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||||
if (err)
|
if (err)
|
||||||
goto abort;
|
goto abort;
|
||||||
|
|
||||||
|
dev->priv.eswitch = esw;
|
||||||
err = esw_offloads_init(esw);
|
err = esw_offloads_init(esw);
|
||||||
if (err)
|
if (err)
|
||||||
goto reps_err;
|
goto reps_err;
|
||||||
|
@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
||||||
else
|
else
|
||||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||||
if (MLX5_ESWITCH_MANAGER(dev) &&
|
|
||||||
mlx5_esw_vport_match_metadata_supported(esw))
|
|
||||||
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
|
||||||
|
|
||||||
dev->priv.eswitch = esw;
|
|
||||||
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
|
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
|
||||||
|
|
||||||
esw_info(dev,
|
esw_info(dev,
|
||||||
|
@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||||
|
|
||||||
reps_err:
|
reps_err:
|
||||||
mlx5_esw_vports_cleanup(esw);
|
mlx5_esw_vports_cleanup(esw);
|
||||||
|
dev->priv.eswitch = NULL;
|
||||||
abort:
|
abort:
|
||||||
if (esw->work_queue)
|
if (esw->work_queue)
|
||||||
destroy_workqueue(esw->work_queue);
|
destroy_workqueue(esw->work_queue);
|
||||||
|
@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||||
|
|
||||||
esw_info(esw->dev, "cleanup\n");
|
esw_info(esw->dev, "cleanup\n");
|
||||||
|
|
||||||
esw->dev->priv.eswitch = NULL;
|
|
||||||
destroy_workqueue(esw->work_queue);
|
destroy_workqueue(esw->work_queue);
|
||||||
WARN_ON(refcount_read(&esw->qos.refcnt));
|
WARN_ON(refcount_read(&esw->qos.refcnt));
|
||||||
mutex_destroy(&esw->state_lock);
|
mutex_destroy(&esw->state_lock);
|
||||||
|
@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||||
mutex_destroy(&esw->offloads.encap_tbl_lock);
|
mutex_destroy(&esw->offloads.encap_tbl_lock);
|
||||||
mutex_destroy(&esw->offloads.decap_tbl_lock);
|
mutex_destroy(&esw->offloads.decap_tbl_lock);
|
||||||
esw_offloads_cleanup(esw);
|
esw_offloads_cleanup(esw);
|
||||||
|
esw->dev->priv.eswitch = NULL;
|
||||||
mlx5_esw_vports_cleanup(esw);
|
mlx5_esw_vports_cleanup(esw);
|
||||||
debugfs_remove_recursive(esw->debugfs_root);
|
debugfs_remove_recursive(esw->debugfs_root);
|
||||||
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
|
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
|
||||||
|
|
|
@ -2476,6 +2476,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
if (MLX5_ESWITCH_MANAGER(esw->dev) &&
|
||||||
|
mlx5_esw_vport_match_metadata_supported(esw))
|
||||||
|
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||||
|
|
||||||
err = devl_params_register(priv_to_devlink(esw->dev),
|
err = devl_params_register(priv_to_devlink(esw->dev),
|
||||||
esw_devlink_params,
|
esw_devlink_params,
|
||||||
ARRAY_SIZE(esw_devlink_params));
|
ARRAY_SIZE(esw_devlink_params));
|
||||||
|
|
|
@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
|
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
|
||||||
mlx5_lag_port_sel_destroy(ldev);
|
mlx5_lag_port_sel_destroy(ldev);
|
||||||
|
ldev->buckets = 1;
|
||||||
|
}
|
||||||
if (mlx5_lag_has_drop_rule(ldev))
|
if (mlx5_lag_has_drop_rule(ldev))
|
||||||
mlx5_lag_drop_rule_cleanup(ldev);
|
mlx5_lag_drop_rule_cleanup(ldev);
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
|
||||||
u16 l3_proto; /* protocol specified in the template */
|
u16 l3_proto; /* protocol specified in the template */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* SparX-5 VCAP fragment types:
|
||||||
|
* 0 = no fragment, 1 = initial fragment,
|
||||||
|
* 2 = suspicious fragment, 3 = valid follow-up fragment
|
||||||
|
*/
|
||||||
|
enum { /* key / mask */
|
||||||
|
FRAG_NOT = 0x03, /* 0 / 3 */
|
||||||
|
FRAG_SOME = 0x11, /* 1 / 1 */
|
||||||
|
FRAG_FIRST = 0x13, /* 1 / 3 */
|
||||||
|
FRAG_LATER = 0x33, /* 3 / 3 */
|
||||||
|
FRAG_INVAL = 0xff, /* invalid */
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Flower fragment flag to VCAP fragment type mapping */
|
||||||
|
static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
|
||||||
|
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
|
||||||
|
{ FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
|
||||||
|
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
|
||||||
|
{ FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
|
||||||
|
/* 0/0 0/1 1/0 1/1 <-- first_frag */
|
||||||
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
|
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
|
||||||
{
|
{
|
||||||
|
@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
|
||||||
flow_rule_match_control(st->frule, &mt);
|
flow_rule_match_control(st->frule, &mt);
|
||||||
|
|
||||||
if (mt.mask->flags) {
|
if (mt.mask->flags) {
|
||||||
if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
|
||||||
if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
|
u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
|
||||||
value = 1; /* initial fragment */
|
u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
|
||||||
mask = 0x3;
|
|
||||||
} else {
|
u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
|
||||||
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
|
||||||
value = 3; /* follow up fragment */
|
u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
|
||||||
mask = 0x3;
|
|
||||||
} else {
|
/* Lookup verdict based on the 2 + 2 input bits */
|
||||||
value = 0; /* no fragment */
|
u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
|
||||||
mask = 0x3;
|
|
||||||
}
|
if (vdt == FRAG_INVAL) {
|
||||||
}
|
NL_SET_ERR_MSG_MOD(st->fco->common.extack,
|
||||||
} else {
|
"Match on invalid fragment flag combination");
|
||||||
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
return -EINVAL;
|
||||||
value = 3; /* follow up fragment */
|
|
||||||
mask = 0x3;
|
|
||||||
} else {
|
|
||||||
value = 0; /* no fragment */
|
|
||||||
mask = 0x3;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Extract VCAP fragment key and mask from verdict */
|
||||||
|
value = (vdt >> 4) & 0x3;
|
||||||
|
mask = vdt & 0x3;
|
||||||
|
|
||||||
err = vcap_rule_add_key_u32(st->vrule,
|
err = vcap_rule_add_key_u32(st->vrule,
|
||||||
VCAP_KF_L3_FRAGMENT_TYPE,
|
VCAP_KF_L3_FRAGMENT_TYPE,
|
||||||
value, mask);
|
value, mask);
|
||||||
|
|
|
@ -550,6 +550,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
|
||||||
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
|
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
|
||||||
|
|
||||||
struct mac_link {
|
struct mac_link {
|
||||||
|
u32 caps;
|
||||||
u32 speed_mask;
|
u32 speed_mask;
|
||||||
u32 speed10;
|
u32 speed10;
|
||||||
u32 speed100;
|
u32 speed100;
|
||||||
|
|
|
@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
|
||||||
|
|
||||||
priv->dev->priv_flags |= IFF_UNICAST_FLT;
|
priv->dev->priv_flags |= IFF_UNICAST_FLT;
|
||||||
|
|
||||||
|
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_10 | MAC_100 | MAC_1000;
|
||||||
/* The loopback bit seems to be re-set when link change
|
/* The loopback bit seems to be re-set when link change
|
||||||
* Simply mask it each time
|
* Simply mask it each time
|
||||||
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
|
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
|
||||||
|
|
|
@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
|
||||||
if (mac->multicast_filter_bins)
|
if (mac->multicast_filter_bins)
|
||||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||||
|
|
||||||
|
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_10 | MAC_100 | MAC_1000;
|
||||||
mac->link.duplex = GMAC_CONTROL_DM;
|
mac->link.duplex = GMAC_CONTROL_DM;
|
||||||
mac->link.speed10 = GMAC_CONTROL_PS;
|
mac->link.speed10 = GMAC_CONTROL_PS;
|
||||||
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
|
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
|
||||||
|
|
|
@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
|
||||||
dev_info(priv->device, "\tDWMAC100\n");
|
dev_info(priv->device, "\tDWMAC100\n");
|
||||||
|
|
||||||
mac->pcsr = priv->ioaddr;
|
mac->pcsr = priv->ioaddr;
|
||||||
|
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_10 | MAC_100;
|
||||||
mac->link.duplex = MAC_CONTROL_F;
|
mac->link.duplex = MAC_CONTROL_F;
|
||||||
mac->link.speed10 = 0;
|
mac->link.speed10 = 0;
|
||||||
mac->link.speed100 = 0;
|
mac->link.speed100 = 0;
|
||||||
|
|
|
@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
|
||||||
|
|
||||||
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
|
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
|
||||||
{
|
{
|
||||||
priv->phylink_config.mac_capabilities |= MAC_2500FD;
|
if (priv->plat->tx_queues_to_use > 1)
|
||||||
|
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||||
|
else
|
||||||
|
priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
|
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
|
||||||
|
@ -1347,6 +1350,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
|
||||||
if (mac->multicast_filter_bins)
|
if (mac->multicast_filter_bins)
|
||||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||||
|
|
||||||
|
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
|
||||||
mac->link.duplex = GMAC_CONFIG_DM;
|
mac->link.duplex = GMAC_CONFIG_DM;
|
||||||
mac->link.speed10 = GMAC_CONFIG_PS;
|
mac->link.speed10 = GMAC_CONFIG_PS;
|
||||||
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
|
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
|
||||||
|
|
|
@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
|
||||||
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
|
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
|
|
||||||
{
|
|
||||||
priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
|
|
||||||
MAC_10000FD | MAC_25000FD |
|
|
||||||
MAC_40000FD | MAC_50000FD |
|
|
||||||
MAC_100000FD;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
|
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
|
||||||
{
|
{
|
||||||
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
|
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
|
||||||
|
@ -1591,7 +1583,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
|
||||||
|
|
||||||
const struct stmmac_ops dwxgmac210_ops = {
|
const struct stmmac_ops dwxgmac210_ops = {
|
||||||
.core_init = dwxgmac2_core_init,
|
.core_init = dwxgmac2_core_init,
|
||||||
.phylink_get_caps = xgmac_phylink_get_caps,
|
|
||||||
.set_mac = dwxgmac2_set_mac,
|
.set_mac = dwxgmac2_set_mac,
|
||||||
.rx_ipc = dwxgmac2_rx_ipc,
|
.rx_ipc = dwxgmac2_rx_ipc,
|
||||||
.rx_queue_enable = dwxgmac2_rx_queue_enable,
|
.rx_queue_enable = dwxgmac2_rx_queue_enable,
|
||||||
|
@ -1653,7 +1644,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
|
||||||
|
|
||||||
const struct stmmac_ops dwxlgmac2_ops = {
|
const struct stmmac_ops dwxlgmac2_ops = {
|
||||||
.core_init = dwxgmac2_core_init,
|
.core_init = dwxgmac2_core_init,
|
||||||
.phylink_get_caps = xgmac_phylink_get_caps,
|
|
||||||
.set_mac = dwxgmac2_set_mac,
|
.set_mac = dwxgmac2_set_mac,
|
||||||
.rx_ipc = dwxgmac2_rx_ipc,
|
.rx_ipc = dwxgmac2_rx_ipc,
|
||||||
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
|
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
|
||||||
|
@ -1714,6 +1704,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
|
||||||
if (mac->multicast_filter_bins)
|
if (mac->multicast_filter_bins)
|
||||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||||
|
|
||||||
|
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_1000FD | MAC_2500FD | MAC_5000FD |
|
||||||
|
MAC_10000FD;
|
||||||
mac->link.duplex = 0;
|
mac->link.duplex = 0;
|
||||||
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
|
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
|
||||||
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
|
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
|
||||||
|
@ -1751,6 +1744,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
|
||||||
if (mac->multicast_filter_bins)
|
if (mac->multicast_filter_bins)
|
||||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||||
|
|
||||||
|
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_1000FD | MAC_2500FD | MAC_5000FD |
|
||||||
|
MAC_10000FD | MAC_25000FD |
|
||||||
|
MAC_40000FD | MAC_50000FD |
|
||||||
|
MAC_100000FD;
|
||||||
mac->link.duplex = 0;
|
mac->link.duplex = 0;
|
||||||
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
|
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
|
||||||
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
|
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
|
||||||
|
|
|
@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stmmac_set_half_duplex(struct stmmac_priv *priv)
|
|
||||||
{
|
|
||||||
/* Half-Duplex can only work with single tx queue */
|
|
||||||
if (priv->plat->tx_queues_to_use > 1)
|
|
||||||
priv->phylink_config.mac_capabilities &=
|
|
||||||
~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
|
||||||
else
|
|
||||||
priv->phylink_config.mac_capabilities |=
|
|
||||||
(MAC_10HD | MAC_100HD | MAC_1000HD);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int stmmac_phy_setup(struct stmmac_priv *priv)
|
static int stmmac_phy_setup(struct stmmac_priv *priv)
|
||||||
{
|
{
|
||||||
struct stmmac_mdio_bus_data *mdio_bus_data;
|
struct stmmac_mdio_bus_data *mdio_bus_data;
|
||||||
|
@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
|
||||||
xpcs_get_interfaces(priv->hw->xpcs,
|
xpcs_get_interfaces(priv->hw->xpcs,
|
||||||
priv->phylink_config.supported_interfaces);
|
priv->phylink_config.supported_interfaces);
|
||||||
|
|
||||||
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
|
||||||
MAC_10FD | MAC_100FD |
|
|
||||||
MAC_1000FD;
|
|
||||||
|
|
||||||
stmmac_set_half_duplex(priv);
|
|
||||||
|
|
||||||
/* Get the MAC specific capabilities */
|
/* Get the MAC specific capabilities */
|
||||||
stmmac_mac_phylink_get_caps(priv);
|
stmmac_mac_phylink_get_caps(priv);
|
||||||
|
|
||||||
|
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||||
|
|
||||||
max_speed = priv->plat->max_speed;
|
max_speed = priv->plat->max_speed;
|
||||||
if (max_speed)
|
if (max_speed)
|
||||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||||
|
@ -7195,6 +7180,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
||||||
{
|
{
|
||||||
struct stmmac_priv *priv = netdev_priv(dev);
|
struct stmmac_priv *priv = netdev_priv(dev);
|
||||||
int ret = 0, i;
|
int ret = 0, i;
|
||||||
|
int max_speed;
|
||||||
|
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
stmmac_release(dev);
|
stmmac_release(dev);
|
||||||
|
@ -7208,7 +7194,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
||||||
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
|
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
|
||||||
rx_cnt);
|
rx_cnt);
|
||||||
|
|
||||||
stmmac_set_half_duplex(priv);
|
stmmac_mac_phylink_get_caps(priv);
|
||||||
|
|
||||||
|
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||||
|
|
||||||
|
max_speed = priv->plat->max_speed;
|
||||||
|
if (max_speed)
|
||||||
|
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||||
|
|
||||||
stmmac_napi_add(dev);
|
stmmac_napi_add(dev);
|
||||||
|
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
|
|
|
@ -2716,6 +2716,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
|
||||||
|
|
||||||
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
||||||
{
|
{
|
||||||
|
struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
|
||||||
|
struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
|
||||||
struct device *dev = common->dev;
|
struct device *dev = common->dev;
|
||||||
struct am65_cpsw_port *port;
|
struct am65_cpsw_port *port;
|
||||||
int ret = 0, i;
|
int ret = 0, i;
|
||||||
|
@ -2728,6 +2730,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/* The DMA Channels are not guaranteed to be in a clean state.
|
||||||
|
* Reset and disable them to ensure that they are back to the
|
||||||
|
* clean state and ready to be used.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < common->tx_ch_num; i++) {
|
||||||
|
k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
|
||||||
|
am65_cpsw_nuss_tx_cleanup);
|
||||||
|
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
|
||||||
|
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
|
||||||
|
am65_cpsw_nuss_rx_cleanup, !!i);
|
||||||
|
|
||||||
|
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
|
||||||
|
|
||||||
ret = am65_cpsw_nuss_register_devlink(common);
|
ret = am65_cpsw_nuss_register_devlink(common);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -2132,14 +2132,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||||
tun_is_little_endian(tun), true,
|
tun_is_little_endian(tun), true,
|
||||||
vlan_hlen)) {
|
vlan_hlen)) {
|
||||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||||
pr_err("unexpected GSO type: "
|
|
||||||
"0x%x, gso_size %d, hdr_len %d\n",
|
if (net_ratelimit()) {
|
||||||
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
|
||||||
tun16_to_cpu(tun, gso.hdr_len));
|
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
||||||
print_hex_dump(KERN_ERR, "tun: ",
|
tun16_to_cpu(tun, gso.hdr_len));
|
||||||
DUMP_PREFIX_NONE,
|
print_hex_dump(KERN_ERR, "tun: ",
|
||||||
16, 1, skb->head,
|
DUMP_PREFIX_NONE,
|
||||||
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
16, 1, skb->head,
|
||||||
|
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
||||||
|
}
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||||
|
|
||||||
netif_set_tso_max_size(dev->net, 16384);
|
netif_set_tso_max_size(dev->net, 16384);
|
||||||
|
|
||||||
|
ax88179_reset(dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1695,7 +1697,6 @@ static const struct driver_info ax88179_info = {
|
||||||
.unbind = ax88179_unbind,
|
.unbind = ax88179_unbind,
|
||||||
.status = ax88179_status,
|
.status = ax88179_status,
|
||||||
.link_reset = ax88179_link_reset,
|
.link_reset = ax88179_link_reset,
|
||||||
.reset = ax88179_reset,
|
|
||||||
.stop = ax88179_stop,
|
.stop = ax88179_stop,
|
||||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||||
.rx_fixup = ax88179_rx_fixup,
|
.rx_fixup = ax88179_rx_fixup,
|
||||||
|
@ -1708,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
|
||||||
.unbind = ax88179_unbind,
|
.unbind = ax88179_unbind,
|
||||||
.status = ax88179_status,
|
.status = ax88179_status,
|
||||||
.link_reset = ax88179_link_reset,
|
.link_reset = ax88179_link_reset,
|
||||||
.reset = ax88179_reset,
|
|
||||||
.stop = ax88179_stop,
|
.stop = ax88179_stop,
|
||||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||||
.rx_fixup = ax88179_rx_fixup,
|
.rx_fixup = ax88179_rx_fixup,
|
||||||
|
|
|
@ -3570,19 +3570,34 @@ static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfu
|
||||||
static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
|
static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
|
||||||
{
|
{
|
||||||
struct virtnet_info *vi = netdev_priv(dev);
|
struct virtnet_info *vi = netdev_priv(dev);
|
||||||
|
bool update = false;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (indir) {
|
if (indir) {
|
||||||
|
if (!vi->has_rss)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
for (i = 0; i < vi->rss_indir_table_size; ++i)
|
for (i = 0; i < vi->rss_indir_table_size; ++i)
|
||||||
vi->ctrl->rss.indirection_table[i] = indir[i];
|
vi->ctrl->rss.indirection_table[i] = indir[i];
|
||||||
|
update = true;
|
||||||
}
|
}
|
||||||
if (key)
|
if (key) {
|
||||||
memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
|
/* If either _F_HASH_REPORT or _F_RSS are negotiated, the
|
||||||
|
* device provides hash calculation capabilities, that is,
|
||||||
|
* hash_key is configured.
|
||||||
|
*/
|
||||||
|
if (!vi->has_rss && !vi->has_rss_hash_report)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
virtnet_commit_rss_command(vi);
|
memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
|
||||||
|
update = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (update)
|
||||||
|
virtnet_commit_rss_command(vi);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4491,13 +4506,15 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
|
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
|
||||||
vi->has_rss_hash_report = true;
|
vi->has_rss_hash_report = true;
|
||||||
|
|
||||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
|
if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
|
||||||
vi->has_rss = true;
|
vi->has_rss = true;
|
||||||
|
|
||||||
if (vi->has_rss || vi->has_rss_hash_report) {
|
|
||||||
vi->rss_indir_table_size =
|
vi->rss_indir_table_size =
|
||||||
virtio_cread16(vdev, offsetof(struct virtio_net_config,
|
virtio_cread16(vdev, offsetof(struct virtio_net_config,
|
||||||
rss_max_indirection_table_length));
|
rss_max_indirection_table_length));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vi->has_rss || vi->has_rss_hash_report) {
|
||||||
vi->rss_key_size =
|
vi->rss_key_size =
|
||||||
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
|
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
|
||||||
|
|
||||||
|
|
|
@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(pci_bus_add_devices);
|
EXPORT_SYMBOL(pci_bus_add_devices);
|
||||||
|
|
||||||
/** pci_walk_bus - walk devices on/under bus, calling callback.
|
static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
|
||||||
* @top bus whose devices should be walked
|
void *userdata, bool locked)
|
||||||
* @cb callback to be called for each device found
|
|
||||||
* @userdata arbitrary pointer to be passed to callback.
|
|
||||||
*
|
|
||||||
* Walk the given bus, including any bridged devices
|
|
||||||
* on buses under this bus. Call the provided callback
|
|
||||||
* on each device found.
|
|
||||||
*
|
|
||||||
* We check the return of @cb each time. If it returns anything
|
|
||||||
* other than 0, we break out.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
|
|
||||||
void *userdata)
|
|
||||||
{
|
{
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
struct pci_bus *bus;
|
struct pci_bus *bus;
|
||||||
|
@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
bus = top;
|
bus = top;
|
||||||
down_read(&pci_bus_sem);
|
if (!locked)
|
||||||
|
down_read(&pci_bus_sem);
|
||||||
next = top->devices.next;
|
next = top->devices.next;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (next == &bus->devices) {
|
if (next == &bus->devices) {
|
||||||
|
@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
|
||||||
if (retval)
|
if (retval)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
up_read(&pci_bus_sem);
|
if (!locked)
|
||||||
|
up_read(&pci_bus_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_walk_bus - walk devices on/under bus, calling callback.
|
||||||
|
* @top: bus whose devices should be walked
|
||||||
|
* @cb: callback to be called for each device found
|
||||||
|
* @userdata: arbitrary pointer to be passed to callback
|
||||||
|
*
|
||||||
|
* Walk the given bus, including any bridged devices
|
||||||
|
* on buses under this bus. Call the provided callback
|
||||||
|
* on each device found.
|
||||||
|
*
|
||||||
|
* We check the return of @cb each time. If it returns anything
|
||||||
|
* other than 0, we break out.
|
||||||
|
*/
|
||||||
|
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
|
||||||
|
{
|
||||||
|
__pci_walk_bus(top, cb, userdata, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_walk_bus);
|
EXPORT_SYMBOL_GPL(pci_walk_bus);
|
||||||
|
|
||||||
|
void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&pci_bus_sem);
|
||||||
|
|
||||||
|
__pci_walk_bus(top, cb, userdata, true);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
|
||||||
|
|
||||||
struct pci_bus *pci_bus_get(struct pci_bus *bus)
|
struct pci_bus *pci_bus_get(struct pci_bus *bus)
|
||||||
{
|
{
|
||||||
if (bus)
|
if (bus)
|
||||||
|
|
|
@ -1303,6 +1303,7 @@ end:
|
||||||
/**
|
/**
|
||||||
* pci_set_full_power_state - Put a PCI device into D0 and update its state
|
* pci_set_full_power_state - Put a PCI device into D0 and update its state
|
||||||
* @dev: PCI device to power up
|
* @dev: PCI device to power up
|
||||||
|
* @locked: whether pci_bus_sem is held
|
||||||
*
|
*
|
||||||
* Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
|
* Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
|
||||||
* to confirm the state change, restore its BARs if they might be lost and
|
* to confirm the state change, restore its BARs if they might be lost and
|
||||||
|
@ -1312,7 +1313,7 @@ end:
|
||||||
* to D0, it is more efficient to use pci_power_up() directly instead of this
|
* to D0, it is more efficient to use pci_power_up() directly instead of this
|
||||||
* function.
|
* function.
|
||||||
*/
|
*/
|
||||||
static int pci_set_full_power_state(struct pci_dev *dev)
|
static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
|
||||||
{
|
{
|
||||||
u16 pmcsr;
|
u16 pmcsr;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1348,7 +1349,7 @@ static int pci_set_full_power_state(struct pci_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->bus->self)
|
if (dev->bus->self)
|
||||||
pcie_aspm_pm_state_change(dev->bus->self);
|
pcie_aspm_pm_state_change(dev->bus->self, locked);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1377,10 +1378,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
|
||||||
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
|
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
|
||||||
|
{
|
||||||
|
if (!bus)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (locked)
|
||||||
|
pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
|
||||||
|
else
|
||||||
|
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_set_low_power_state - Put a PCI device into a low-power state.
|
* pci_set_low_power_state - Put a PCI device into a low-power state.
|
||||||
* @dev: PCI device to handle.
|
* @dev: PCI device to handle.
|
||||||
* @state: PCI power state (D1, D2, D3hot) to put the device into.
|
* @state: PCI power state (D1, D2, D3hot) to put the device into.
|
||||||
|
* @locked: whether pci_bus_sem is held
|
||||||
*
|
*
|
||||||
* Use the device's PCI_PM_CTRL register to put it into a low-power state.
|
* Use the device's PCI_PM_CTRL register to put it into a low-power state.
|
||||||
*
|
*
|
||||||
|
@ -1391,7 +1404,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
|
||||||
* 0 if device already is in the requested state.
|
* 0 if device already is in the requested state.
|
||||||
* 0 if device's power state has been successfully changed.
|
* 0 if device's power state has been successfully changed.
|
||||||
*/
|
*/
|
||||||
static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
|
static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
|
||||||
{
|
{
|
||||||
u16 pmcsr;
|
u16 pmcsr;
|
||||||
|
|
||||||
|
@ -1445,29 +1458,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
pci_power_name(state));
|
pci_power_name(state));
|
||||||
|
|
||||||
if (dev->bus->self)
|
if (dev->bus->self)
|
||||||
pcie_aspm_pm_state_change(dev->bus->self);
|
pcie_aspm_pm_state_change(dev->bus->self, locked);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
|
||||||
* pci_set_power_state - Set the power state of a PCI device
|
|
||||||
* @dev: PCI device to handle.
|
|
||||||
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
|
|
||||||
*
|
|
||||||
* Transition a device to a new power state, using the platform firmware and/or
|
|
||||||
* the device's PCI PM registers.
|
|
||||||
*
|
|
||||||
* RETURN VALUE:
|
|
||||||
* -EINVAL if the requested state is invalid.
|
|
||||||
* -EIO if device does not support PCI PM or its PM capabilities register has a
|
|
||||||
* wrong version, or device doesn't support the requested state.
|
|
||||||
* 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
|
|
||||||
* 0 if device already is in the requested state.
|
|
||||||
* 0 if the transition is to D3 but D3 is not supported.
|
|
||||||
* 0 if device's power state has been successfully changed.
|
|
||||||
*/
|
|
||||||
int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
@ -1491,7 +1487,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (state == PCI_D0)
|
if (state == PCI_D0)
|
||||||
return pci_set_full_power_state(dev);
|
return pci_set_full_power_state(dev, locked);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This device is quirked not to be put into D3, so don't put it in
|
* This device is quirked not to be put into D3, so don't put it in
|
||||||
|
@ -1505,16 +1501,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
* To put the device in D3cold, put it into D3hot in the native
|
* To put the device in D3cold, put it into D3hot in the native
|
||||||
* way, then put it into D3cold using platform ops.
|
* way, then put it into D3cold using platform ops.
|
||||||
*/
|
*/
|
||||||
error = pci_set_low_power_state(dev, PCI_D3hot);
|
error = pci_set_low_power_state(dev, PCI_D3hot, locked);
|
||||||
|
|
||||||
if (pci_platform_power_transition(dev, PCI_D3cold))
|
if (pci_platform_power_transition(dev, PCI_D3cold))
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
/* Powering off a bridge may power off the whole hierarchy */
|
/* Powering off a bridge may power off the whole hierarchy */
|
||||||
if (dev->current_state == PCI_D3cold)
|
if (dev->current_state == PCI_D3cold)
|
||||||
pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
|
__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
|
||||||
} else {
|
} else {
|
||||||
error = pci_set_low_power_state(dev, state);
|
error = pci_set_low_power_state(dev, state, locked);
|
||||||
|
|
||||||
if (pci_platform_power_transition(dev, state))
|
if (pci_platform_power_transition(dev, state))
|
||||||
return error;
|
return error;
|
||||||
|
@ -1522,8 +1518,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_set_power_state - Set the power state of a PCI device
|
||||||
|
* @dev: PCI device to handle.
|
||||||
|
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
|
||||||
|
*
|
||||||
|
* Transition a device to a new power state, using the platform firmware and/or
|
||||||
|
* the device's PCI PM registers.
|
||||||
|
*
|
||||||
|
* RETURN VALUE:
|
||||||
|
* -EINVAL if the requested state is invalid.
|
||||||
|
* -EIO if device does not support PCI PM or its PM capabilities register has a
|
||||||
|
* wrong version, or device doesn't support the requested state.
|
||||||
|
* 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
|
||||||
|
* 0 if device already is in the requested state.
|
||||||
|
* 0 if the transition is to D3 but D3 is not supported.
|
||||||
|
* 0 if device's power state has been successfully changed.
|
||||||
|
*/
|
||||||
|
int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||||
|
{
|
||||||
|
return __pci_set_power_state(dev, state, false);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(pci_set_power_state);
|
EXPORT_SYMBOL(pci_set_power_state);
|
||||||
|
|
||||||
|
int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&pci_bus_sem);
|
||||||
|
|
||||||
|
return __pci_set_power_state(dev, state, true);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pci_set_power_state_locked);
|
||||||
|
|
||||||
#define PCI_EXP_SAVE_REGS 7
|
#define PCI_EXP_SAVE_REGS 7
|
||||||
|
|
||||||
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
|
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
|
||||||
|
|
|
@ -570,12 +570,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
|
||||||
#ifdef CONFIG_PCIEASPM
|
#ifdef CONFIG_PCIEASPM
|
||||||
void pcie_aspm_init_link_state(struct pci_dev *pdev);
|
void pcie_aspm_init_link_state(struct pci_dev *pdev);
|
||||||
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
|
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
|
||||||
void pcie_aspm_pm_state_change(struct pci_dev *pdev);
|
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
|
||||||
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
|
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
|
||||||
#else
|
#else
|
||||||
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
|
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
|
||||||
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
|
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
|
||||||
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
|
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
|
||||||
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
|
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -689,10 +689,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
|
||||||
* in pcie_config_aspm_link().
|
* in pcie_config_aspm_link().
|
||||||
*/
|
*/
|
||||||
if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
|
if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
|
||||||
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
|
pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
|
||||||
PCI_EXP_LNKCTL_ASPM_L1, 0);
|
PCI_EXP_LNKCTL_ASPM_L1);
|
||||||
pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
|
pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
|
||||||
PCI_EXP_LNKCTL_ASPM_L1, 0);
|
PCI_EXP_LNKCTL_ASPM_L1);
|
||||||
}
|
}
|
||||||
|
|
||||||
val = 0;
|
val = 0;
|
||||||
|
@ -1001,8 +1001,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
|
||||||
up_read(&pci_bus_sem);
|
up_read(&pci_bus_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* @pdev: the root port or switch downstream port */
|
/*
|
||||||
void pcie_aspm_pm_state_change(struct pci_dev *pdev)
|
* @pdev: the root port or switch downstream port
|
||||||
|
* @locked: whether pci_bus_sem is held
|
||||||
|
*/
|
||||||
|
void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
|
||||||
{
|
{
|
||||||
struct pcie_link_state *link = pdev->link_state;
|
struct pcie_link_state *link = pdev->link_state;
|
||||||
|
|
||||||
|
@ -1012,12 +1015,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
|
||||||
* Devices changed PM state, we should recheck if latency
|
* Devices changed PM state, we should recheck if latency
|
||||||
* meets all functions' requirement
|
* meets all functions' requirement
|
||||||
*/
|
*/
|
||||||
down_read(&pci_bus_sem);
|
if (!locked)
|
||||||
|
down_read(&pci_bus_sem);
|
||||||
mutex_lock(&aspm_lock);
|
mutex_lock(&aspm_lock);
|
||||||
pcie_update_aspm_capable(link->root);
|
pcie_update_aspm_capable(link->root);
|
||||||
pcie_config_aspm_path(link);
|
pcie_config_aspm_path(link);
|
||||||
mutex_unlock(&aspm_lock);
|
mutex_unlock(&aspm_lock);
|
||||||
up_read(&pci_bus_sem);
|
if (!locked)
|
||||||
|
up_read(&pci_bus_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
|
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#define dev_fmt(fmt) "DPC: " fmt
|
#define dev_fmt(fmt) "DPC: " fmt
|
||||||
|
|
||||||
#include <linux/aer.h>
|
#include <linux/aer.h>
|
||||||
|
#include <linux/bitfield.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -202,7 +203,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
|
||||||
|
|
||||||
/* Get First Error Pointer */
|
/* Get First Error Pointer */
|
||||||
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
|
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
|
||||||
first_error = (dpc_status & 0x1f00) >> 8;
|
first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
|
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
|
||||||
if ((status & ~mask) & (1 << i))
|
if ((status & ~mask) & (1 << i))
|
||||||
|
@ -338,7 +339,7 @@ void pci_dpc_init(struct pci_dev *pdev)
|
||||||
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
|
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
|
||||||
if (!pdev->dpc_rp_log_size) {
|
if (!pdev->dpc_rp_log_size) {
|
||||||
pdev->dpc_rp_log_size =
|
pdev->dpc_rp_log_size =
|
||||||
(cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
|
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
|
||||||
if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
|
if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
|
||||||
pci_err(pdev, "RP PIO log size %u is invalid\n",
|
pci_err(pdev, "RP PIO log size %u is invalid\n",
|
||||||
pdev->dpc_rp_log_size);
|
pdev->dpc_rp_log_size);
|
||||||
|
|
|
@ -4571,9 +4571,9 @@ static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
|
||||||
|
|
||||||
pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
|
pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
|
||||||
dev_name(&pdev->dev));
|
dev_name(&pdev->dev));
|
||||||
pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
|
pcie_capability_clear_word(root_port, PCI_EXP_DEVCTL,
|
||||||
PCI_EXP_DEVCTL_RELAX_EN |
|
PCI_EXP_DEVCTL_RELAX_EN |
|
||||||
PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
|
PCI_EXP_DEVCTL_NOSNOOP_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -6205,7 +6205,7 @@ static void dpc_log_size(struct pci_dev *dev)
|
||||||
if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
|
if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
|
if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
|
||||||
pci_info(dev, "Overriding RP PIO Log Size to 4\n");
|
pci_info(dev, "Overriding RP PIO Log Size to 4\n");
|
||||||
dev->dpc_rp_log_size = 4;
|
dev->dpc_rp_log_size = 4;
|
||||||
}
|
}
|
||||||
|
|
|
@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
|
||||||
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
|
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.ident = "Framework Laptop 13 (Phoenix)",
|
||||||
|
.driver_data = &quirk_spurious_8042,
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
|
||||||
|
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
|
||||||
|
}
|
||||||
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
||||||
|
|
||||||
spin_lock_irq(cdev->ccwlock);
|
spin_lock_irq(cdev->ccwlock);
|
||||||
ret = ccw_device_online(cdev);
|
ret = ccw_device_online(cdev);
|
||||||
spin_unlock_irq(cdev->ccwlock);
|
if (ret) {
|
||||||
if (ret == 0)
|
spin_unlock_irq(cdev->ccwlock);
|
||||||
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
|
||||||
else {
|
|
||||||
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
|
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
|
||||||
"device 0.%x.%04x\n",
|
"device 0.%x.%04x\n",
|
||||||
ret, cdev->private->dev_id.ssid,
|
ret, cdev->private->dev_id.ssid,
|
||||||
|
@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
||||||
put_device(&cdev->dev);
|
put_device(&cdev->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
spin_lock_irq(cdev->ccwlock);
|
/* Wait until a final state is reached */
|
||||||
|
while (!dev_fsm_final_state(cdev)) {
|
||||||
|
spin_unlock_irq(cdev->ccwlock);
|
||||||
|
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
||||||
|
spin_lock_irq(cdev->ccwlock);
|
||||||
|
}
|
||||||
/* Check if online processing was successful */
|
/* Check if online processing was successful */
|
||||||
if ((cdev->private->state != DEV_STATE_ONLINE) &&
|
if ((cdev->private->state != DEV_STATE_ONLINE) &&
|
||||||
(cdev->private->state != DEV_STATE_W4SENSE)) {
|
(cdev->private->state != DEV_STATE_W4SENSE)) {
|
||||||
|
|
|
@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
|
||||||
lgr_info_log();
|
lgr_info_log();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
||||||
int dstat)
|
int dstat, int dcc)
|
||||||
{
|
{
|
||||||
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
|
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
|
||||||
|
|
||||||
|
@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
||||||
goto error;
|
goto error;
|
||||||
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
|
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
|
||||||
goto error;
|
goto error;
|
||||||
|
if (dcc == 1)
|
||||||
|
return -EAGAIN;
|
||||||
if (!(dstat & DEV_STAT_DEV_END))
|
if (!(dstat & DEV_STAT_DEV_END))
|
||||||
goto error;
|
goto error;
|
||||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
|
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
|
||||||
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
|
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
|
||||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
||||||
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* qdio interrupt handler */
|
/* qdio interrupt handler */
|
||||||
|
@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||||
{
|
{
|
||||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||||
struct subchannel_id schid;
|
struct subchannel_id schid;
|
||||||
int cstat, dstat;
|
int cstat, dstat, rc, dcc;
|
||||||
|
|
||||||
if (!intparm || !irq_ptr) {
|
if (!intparm || !irq_ptr) {
|
||||||
ccw_device_get_schid(cdev, &schid);
|
ccw_device_get_schid(cdev, &schid);
|
||||||
|
@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||||
qdio_irq_check_sense(irq_ptr, irb);
|
qdio_irq_check_sense(irq_ptr, irb);
|
||||||
cstat = irb->scsw.cmd.cstat;
|
cstat = irb->scsw.cmd.cstat;
|
||||||
dstat = irb->scsw.cmd.dstat;
|
dstat = irb->scsw.cmd.dstat;
|
||||||
|
dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
|
||||||
|
rc = 0;
|
||||||
|
|
||||||
switch (irq_ptr->state) {
|
switch (irq_ptr->state) {
|
||||||
case QDIO_IRQ_STATE_INACTIVE:
|
case QDIO_IRQ_STATE_INACTIVE:
|
||||||
qdio_establish_handle_irq(irq_ptr, cstat, dstat);
|
rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
|
||||||
break;
|
break;
|
||||||
case QDIO_IRQ_STATE_CLEANUP:
|
case QDIO_IRQ_STATE_CLEANUP:
|
||||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
||||||
|
@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||||
if (cstat || dstat)
|
if (cstat || dstat)
|
||||||
qdio_handle_activate_check(irq_ptr, intparm, cstat,
|
qdio_handle_activate_check(irq_ptr, intparm, cstat,
|
||||||
dstat);
|
dstat);
|
||||||
|
else if (dcc == 1)
|
||||||
|
rc = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
case QDIO_IRQ_STATE_STOPPED:
|
case QDIO_IRQ_STATE_STOPPED:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rc == -EAGAIN) {
|
||||||
|
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
|
||||||
|
rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
|
||||||
|
if (!rc)
|
||||||
|
return;
|
||||||
|
DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
|
||||||
|
DBF_ERROR("rc:%4x", rc);
|
||||||
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
||||||
|
}
|
||||||
|
|
||||||
wake_up(&cdev->private->wait_q);
|
wake_up(&cdev->private->wait_q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -291,13 +291,16 @@ out:
|
||||||
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||||
{
|
{
|
||||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||||
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
|
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
|
||||||
dmb->cpu_addr, dmb->dma_addr);
|
DMA_FROM_DEVICE);
|
||||||
|
folio_put(virt_to_folio(dmb->cpu_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||||
{
|
{
|
||||||
|
struct folio *folio;
|
||||||
unsigned long bit;
|
unsigned long bit;
|
||||||
|
int rc;
|
||||||
|
|
||||||
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
|
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -314,14 +317,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||||
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
|
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
|
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
|
||||||
&dmb->dma_addr,
|
__GFP_NORETRY, get_order(dmb->dmb_len));
|
||||||
GFP_KERNEL | __GFP_NOWARN |
|
|
||||||
__GFP_NOMEMALLOC | __GFP_NORETRY);
|
|
||||||
if (!dmb->cpu_addr)
|
|
||||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
|
||||||
|
|
||||||
return dmb->cpu_addr ? 0 : -ENOMEM;
|
if (!folio) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto out_bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
dmb->cpu_addr = folio_address(folio);
|
||||||
|
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
|
||||||
|
virt_to_page(dmb->cpu_addr), 0,
|
||||||
|
dmb->dmb_len, DMA_FROM_DEVICE);
|
||||||
|
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_free:
|
||||||
|
kfree(dmb->cpu_addr);
|
||||||
|
out_bit:
|
||||||
|
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
|
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue