Merge linux 6.6.48

Conflicts:
	fs/nfsd/nfssvc.c

Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Jianping Liu 2024-09-29 16:01:50 +08:00
commit 60113f62b0
389 changed files with 3843 additions and 1973 deletions

View File

@ -565,7 +565,8 @@ Description: Control Symmetric Multi Threading (SMT)
================ ========================================= ================ =========================================
If control status is "forceoff" or "notsupported" writes If control status is "forceoff" or "notsupported" writes
are rejected. are rejected. Note that enabling SMT on PowerPC skips
offline cores.
What: /sys/devices/system/cpu/cpuX/power/energy_perf_bias What: /sys/devices/system/cpu/cpuX/power/energy_perf_bias
Date: March 2019 Date: March 2019

View File

@ -8,7 +8,7 @@ else
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 47 SUBLEVEL = 48
EXTRAVERSION = EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth

View File

@ -929,7 +929,7 @@ const struct dma_map_ops alpha_pci_ops = {
.dma_supported = alpha_pci_supported, .dma_supported = alpha_pci_supported,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
}; };
EXPORT_SYMBOL(alpha_pci_ops); EXPORT_SYMBOL(alpha_pci_ops);

View File

@ -27,7 +27,7 @@
#include <asm/numa.h> #include <asm/numa.h>
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
int __init acpi_numa_get_nid(unsigned int cpu) int __init acpi_numa_get_nid(unsigned int cpu)
{ {

View File

@ -371,9 +371,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
smp_init_cpus(); smp_init_cpus();
smp_build_mpidr_hash(); smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_sw_tags();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
/* /*
* Make sure init_thread_info.ttbr0 always generates translation * Make sure init_thread_info.ttbr0 always generates translation

View File

@ -460,6 +460,8 @@ void __init smp_prepare_boot_cpu(void)
init_gic_priority_masking(); init_gic_priority_masking();
kasan_init_hw_tags(); kasan_init_hw_tags();
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_sw_tags();
} }
/* /*

View File

@ -32,6 +32,7 @@
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include "sys_regs.h" #include "sys_regs.h"
#include "vgic/vgic.h"
#include "trace.h" #include "trace.h"
@ -301,6 +302,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
{ {
bool g1; bool g1;
if (!kvm_has_gicv3(vcpu->kvm)) {
kvm_inject_undefined(vcpu);
return false;
}
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p, r); return read_from_write_only(vcpu, p, r);

View File

@ -343,4 +343,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
static inline bool kvm_has_gicv3(struct kvm *kvm)
{
return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
irqchip_in_kernel(kvm) &&
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
}
#endif #endif

View File

@ -617,7 +617,7 @@ const struct dma_map_ops jazz_dma_ops = {
.sync_sg_for_device = jazz_dma_sync_sg_for_device, .sync_sg_for_device = jazz_dma_sync_sg_for_device,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
}; };
EXPORT_SYMBOL(jazz_dma_ops); EXPORT_SYMBOL(jazz_dma_ops);

View File

@ -1725,12 +1725,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
LOONGSON_CONF6_INTIMER);
break; break;
case PRID_IMP_LOONGSON_64G: case PRID_IMP_LOONGSON_64G:
__cpu_name[cpu] = "ICT Loongson-3"; __cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a"); set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2); set_isa(c, MIPS_CPU_ISA_M64R2);
decode_cpucfg(c); decode_cpucfg(c);
change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
LOONGSON_CONF6_INTIMER);
break; break;
default: default:
panic("Unknown Loongson Processor ID!"); panic("Unknown Loongson Processor ID!");

View File

@ -255,6 +255,9 @@ void calibrate_delay(void)
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
/* setup memblock allocator */
setup_memory();
unflatten_and_copy_device_tree(); unflatten_and_copy_device_tree();
setup_cpuinfo(); setup_cpuinfo();
@ -278,9 +281,6 @@ void __init setup_arch(char **cmdline_p)
} }
#endif #endif
/* setup memblock allocator */
setup_memory();
/* paging_init() sets up the MMU and marks all pages as reserved */ /* paging_init() sets up the MMU and marks all pages as reserved */
paging_init(); paging_init();

View File

@ -498,7 +498,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
local_irq_disable(); local_irq_disable();
irq_enter(); irq_enter_rcu();
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
if (!eirr_val) if (!eirr_val)
@ -533,7 +533,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
out: out:
irq_exit(); irq_exit_rcu();
set_irq_regs(old_regs); set_irq_regs(old_regs);
return; return;

View File

@ -112,8 +112,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
return ptr; return ptr;
new = simple_malloc(size); new = simple_malloc(size);
if (new) {
memcpy(new, ptr, p->size); memcpy(new, ptr, p->size);
simple_free(ptr); simple_free(ptr);
}
return new; return new;
} }

View File

@ -145,6 +145,7 @@ static inline int cpu_to_coregroup_id(int cpu)
#ifdef CONFIG_HOTPLUG_SMT #ifdef CONFIG_HOTPLUG_SMT
#include <linux/cpu_smt.h> #include <linux/cpu_smt.h>
#include <linux/cpumask.h>
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
static inline bool topology_is_primary_thread(unsigned int cpu) static inline bool topology_is_primary_thread(unsigned int cpu)
@ -156,6 +157,18 @@ static inline bool topology_smt_thread_allowed(unsigned int cpu)
{ {
return cpu_thread_in_core(cpu) < cpu_smt_num_threads; return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
} }
#define topology_is_core_online topology_is_core_online
static inline bool topology_is_core_online(unsigned int cpu)
{
int i, first_cpu = cpu_first_thread_sibling(cpu);
for (i = first_cpu; i < first_cpu + threads_per_core; ++i) {
if (cpu_online(i))
return true;
}
return false;
}
#endif #endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -216,6 +216,6 @@ const struct dma_map_ops dma_iommu_ops = {
.get_required_mask = dma_iommu_get_required_mask, .get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
}; };

View File

@ -695,7 +695,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = {
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
}; };
@ -709,7 +709,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = {
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
}; };

View File

@ -23,6 +23,46 @@ void papr_sysparm_buf_free(struct papr_sysparm_buf *buf)
kfree(buf); kfree(buf);
} }
static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf)
{
return be16_to_cpu(buf->len);
}
static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length)
{
WARN_ONCE(length > sizeof(buf->val),
"bogus length %zu, clamping to safe value", length);
length = min(sizeof(buf->val), length);
buf->len = cpu_to_be16(length);
}
/*
* For use on buffers returned from ibm,get-system-parameter before
* returning them to callers. Ensures the encoded length of valid data
* cannot overrun buf->val[].
*/
static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf)
{
papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf));
}
/*
* Perform some basic diligence on the system parameter buffer before
* submitting it to RTAS.
*/
static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf)
{
/*
* Firmware ought to reject buffer lengths that exceed the
* maximum specified in PAPR, but there's no reason for the
* kernel to allow them either.
*/
if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val))
return false;
return true;
}
/** /**
* papr_sysparm_get() - Retrieve the value of a PAPR system parameter. * papr_sysparm_get() - Retrieve the value of a PAPR system parameter.
* @param: PAPR system parameter token as described in * @param: PAPR system parameter token as described in
@ -63,6 +103,9 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
if (token == RTAS_UNKNOWN_SERVICE) if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT; return -ENOENT;
if (!papr_sysparm_buf_can_submit(buf))
return -EINVAL;
work_area = rtas_work_area_alloc(sizeof(*buf)); work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
@ -77,6 +120,7 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
case 0: case 0:
ret = 0; ret = 0;
memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf)); memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf));
papr_sysparm_buf_clamp_length(buf);
break; break;
case -3: /* parameter not implemented */ case -3: /* parameter not implemented */
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
@ -115,6 +159,9 @@ int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf)
if (token == RTAS_UNKNOWN_SERVICE) if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT; return -ENOENT;
if (!papr_sysparm_buf_can_submit(buf))
return -EINVAL;
work_area = rtas_work_area_alloc(sizeof(*buf)); work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf)); memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));

View File

@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.get_required_mask = dma_iommu_get_required_mask, .get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
}; };

View File

@ -236,6 +236,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
cpu, hw_id); cpu, hw_id);
if (!rname)
return -ENOMEM;
if (!request_mem_region(addr, size, rname)) { if (!request_mem_region(addr, size, rname)) {
pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n", pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
cpu, hw_id); cpu, hw_id);

View File

@ -164,6 +164,16 @@
REG_L x31, PT_T6(sp) REG_L x31, PT_T6(sp)
.endm .endm
/* Annotate a function as being unsuitable for kprobes. */
#ifdef CONFIG_KPROBES
#define ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
RISCV_PTR name; \
.popsection
#else
#define ASM_NOKPROBE(name)
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */ #endif /* _ASM_RISCV_ASM_H */

View File

@ -105,6 +105,7 @@ _save_context:
1: 1:
tail do_trap_unknown tail do_trap_unknown
SYM_CODE_END(handle_exception) SYM_CODE_END(handle_exception)
ASM_NOKPROBE(handle_exception)
/* /*
* The ret_from_exception must be called with interrupt disabled. Here is the * The ret_from_exception must be called with interrupt disabled. Here is the
@ -171,6 +172,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
sret sret
#endif #endif
SYM_CODE_END(ret_from_exception) SYM_CODE_END(ret_from_exception)
ASM_NOKPROBE(ret_from_exception)
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
@ -206,6 +208,7 @@ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
move a0, sp move a0, sp
tail handle_bad_stack tail handle_bad_stack
SYM_CODE_END(handle_kernel_stack_overflow) SYM_CODE_END(handle_kernel_stack_overflow)
ASM_NOKPROBE(handle_kernel_stack_overflow)
#endif #endif
SYM_CODE_START(ret_from_fork) SYM_CODE_START(ret_from_fork)

View File

@ -311,6 +311,7 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
regs->epc += 4; regs->epc += 4;
regs->orig_a0 = regs->a0; regs->orig_a0 = regs->a0;
regs->a0 = -ENOSYS;
riscv_v_vstate_discard(regs); riscv_v_vstate_discard(regs);
@ -318,8 +319,6 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
if (syscall >= 0 && syscall < NR_syscalls) if (syscall >= 0 && syscall < NR_syscalls)
syscall_handler(regs, syscall); syscall_handler(regs, syscall);
else if (syscall != -1)
regs->a0 = -ENOSYS;
syscall_exit_to_user_mode(regs); syscall_exit_to_user_mode(regs);
} else { } else {

View File

@ -912,7 +912,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
PMD_SIZE, PAGE_KERNEL_EXEC); PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */ /* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; end_va = kernel_map.virt_addr + kernel_map.size;
for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va, create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
@ -1081,7 +1081,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
phys_ram_base = CONFIG_PHYS_RAM_BASE; phys_ram_base = CONFIG_PHYS_RAM_BASE;
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else #else

View File

@ -442,7 +442,10 @@ static inline int share(unsigned long addr, u16 cmd)
if (!uv_call(0, (u64)&uvcb)) if (!uv_call(0, (u64)&uvcb))
return 0; return 0;
return -EINVAL; pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
uvcb.header.rc, uvcb.header.rrc);
panic("System security cannot be guaranteed unless the system panics now.\n");
} }
/* /*

View File

@ -258,15 +258,9 @@ static inline void save_vector_registers(void)
#endif #endif
} }
static inline void setup_control_registers(void) static inline void setup_low_address_protection(void)
{ {
unsigned long reg; __ctl_set_bit(0, 28);
__ctl_store(reg, 0, 0);
reg |= CR0_LOW_ADDRESS_PROTECTION;
reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
reg |= CR0_EXTERNAL_CALL_SUBMASK;
__ctl_load(reg, 0, 0);
} }
static inline void setup_access_registers(void) static inline void setup_access_registers(void)
@ -314,7 +308,7 @@ void __init startup_init(void)
save_vector_registers(); save_vector_registers();
setup_topology(); setup_topology();
sclp_early_detect(); sclp_early_detect();
setup_control_registers(); setup_low_address_protection();
setup_access_registers(); setup_access_registers();
lockdep_on(); lockdep_on();
} }

View File

@ -1013,12 +1013,12 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
/* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201"); panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */ ctl_set_bit(0, 14);
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202"); panic("Couldn't request external interrupt 0x1202");
ctl_set_bit(0, 13);
} }
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)

View File

@ -249,7 +249,12 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
{ {
u32 gd = virt_to_phys(kvm->arch.gisa_int.origin); u32 gd;
if (!kvm->arch.gisa_int.origin)
return 0;
gd = virt_to_phys(kvm->arch.gisa_int.origin);
if (gd && sclp.has_gisaf) if (gd && sclp.has_gisaf)
gd |= GISA_FORMAT1; gd |= GISA_FORMAT1;

View File

@ -676,7 +676,7 @@ static const struct dma_map_ops gart_dma_ops = {
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.dma_supported = dma_direct_supported, .dma_supported = dma_direct_supported,
.get_required_mask = dma_direct_get_required_mask, .get_required_mask = dma_direct_get_required_mask,
.alloc_pages = dma_direct_alloc_pages, .alloc_pages_op = dma_direct_alloc_pages,
.free_pages = dma_direct_free_pages, .free_pages = dma_direct_free_pages,
}; };

View File

@ -1030,7 +1030,10 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm) unsigned long arch_randomize_brk(struct mm_struct *mm)
{ {
return randomize_page(mm->brk, 0x02000000); if (mmap_is_ia32())
return randomize_page(mm->brk, SZ_32M);
return randomize_page(mm->brk, SZ_1G);
} }
/* /*

View File

@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
unsigned int users; unsigned int users;
unsigned long flags;
struct blk_mq_tags *tags = hctx->tags; struct blk_mq_tags *tags = hctx->tags;
/* /*
@ -56,11 +57,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
return; return;
} }
spin_lock_irq(&tags->lock); spin_lock_irqsave(&tags->lock, flags);
users = tags->active_queues + 1; users = tags->active_queues + 1;
WRITE_ONCE(tags->active_queues, users); WRITE_ONCE(tags->active_queues, users);
blk_mq_update_wake_batch(tags, users); blk_mq_update_wake_batch(tags, users);
spin_unlock_irq(&tags->lock); spin_unlock_irqrestore(&tags->lock, flags);
} }
/* /*

View File

@ -1645,19 +1645,19 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_data64b_fops); &hl_data64b_fops);
debugfs_create_file("set_power_state", debugfs_create_file("set_power_state",
0200, 0644,
root, root,
dev_entry, dev_entry,
&hl_power_fops); &hl_power_fops);
debugfs_create_file("device", debugfs_create_file("device",
0200, 0644,
root, root,
dev_entry, dev_entry,
&hl_device_fops); &hl_device_fops);
debugfs_create_file("clk_gate", debugfs_create_file("clk_gate",
0200, 0644,
root, root,
dev_entry, dev_entry,
&hl_clk_gate_fops); &hl_clk_gate_fops);
@ -1669,13 +1669,13 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_stop_on_err_fops); &hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations", debugfs_create_file("dump_security_violations",
0644, 0400,
root, root,
dev_entry, dev_entry,
&hl_security_violations_fops); &hl_security_violations_fops);
debugfs_create_file("dump_razwi_events", debugfs_create_file("dump_razwi_events",
0644, 0400,
root, root,
dev_entry, dev_entry,
&hl_razwi_check_fops); &hl_razwi_check_fops);
@ -1708,7 +1708,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->reset_info.skip_reset_on_timeout); &hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump", debugfs_create_file("state_dump",
0600, 0644,
root, root,
dev_entry, dev_entry,
&hl_state_dump_fops); &hl_state_dump_fops);
@ -1726,7 +1726,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name, debugfs_create_file(hl_debugfs_list[i].name,
0444, 0644,
root, root,
entry, entry,
&hl_debugfs_fops); &hl_debugfs_fops);

View File

@ -271,6 +271,9 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
free_node->cq_cb = pend->ts_reg_info.cq_cb; free_node->cq_cb = pend->ts_reg_info.cq_cb;
list_add(&free_node->free_objects_node, *free_list); list_add(&free_node->free_objects_node, *free_list);
/* Mark TS record as free */
pend->ts_reg_info.in_use = false;
return 0; return 0;
} }

View File

@ -1878,16 +1878,16 @@ err_dma_buf_put:
static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size) static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
{ {
if (!IS_ALIGNED(device_addr, PAGE_SIZE)) { if (!PAGE_ALIGNED(device_addr)) {
dev_dbg(hdev->dev, dev_dbg(hdev->dev,
"exported device memory address 0x%llx should be aligned to 0x%lx\n", "exported device memory address 0x%llx should be aligned to PAGE_SIZE 0x%lx\n",
device_addr, PAGE_SIZE); device_addr, PAGE_SIZE);
return -EINVAL; return -EINVAL;
} }
if (size < PAGE_SIZE) { if (!size || !PAGE_ALIGNED(size)) {
dev_dbg(hdev->dev, dev_dbg(hdev->dev,
"exported device memory size %llu should be equal to or greater than %lu\n", "exported device memory size %llu should be a multiple of PAGE_SIZE %lu\n",
size, PAGE_SIZE); size, PAGE_SIZE);
return -EINVAL; return -EINVAL;
} }
@ -1938,6 +1938,13 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s
if (rc) if (rc)
return rc; return rc;
if (!PAGE_ALIGNED(offset)) {
dev_dbg(hdev->dev,
"exported device memory offset %llu should be a multiple of PAGE_SIZE %lu\n",
offset, PAGE_SIZE);
return -EINVAL;
}
if ((offset + size) > phys_pg_pack->total_size) { if ((offset + size) > phys_pg_pack->total_size) {
dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n", dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
offset, size, phys_pg_pack->total_size); offset, size, phys_pg_pack->total_size);

View File

@ -1601,6 +1601,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_KERNEL_SRF_30, mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
mmDCORE0_TPC0_CFG_KERNEL_SRF_31, mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
mmDCORE0_TPC0_CFG_TPC_SB_L0CD, mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
mmDCORE0_TPC0_CFG_TPC_COUNT,
mmDCORE0_TPC0_CFG_TPC_ID, mmDCORE0_TPC0_CFG_TPC_ID,
mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC, mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0, mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,

View File

@ -188,13 +188,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked); u8 acpi_ns_is_locked);
void void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function); acpi_adr_space_type space_id, u32 function);
void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
acpi_status acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function); acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);

View File

@ -20,6 +20,10 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */ /* Local prototypes */
static void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id);
static acpi_status static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle, acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value); u32 level, void *context, void **return_value);
@ -61,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_gbl_default_address_spaces acpi_gbl_default_address_spaces
[i])) { [i])) {
acpi_ev_execute_reg_methods(acpi_gbl_root_node, acpi_ev_execute_reg_methods(acpi_gbl_root_node,
ACPI_UINT32_MAX,
acpi_gbl_default_address_spaces acpi_gbl_default_address_spaces
[i], ACPI_REG_CONNECT); [i], ACPI_REG_CONNECT);
} }
@ -668,6 +673,7 @@ cleanup1:
* FUNCTION: acpi_ev_execute_reg_methods * FUNCTION: acpi_ev_execute_reg_methods
* *
* PARAMETERS: node - Namespace node for the device * PARAMETERS: node - Namespace node for the device
* max_depth - Depth to which search for _REG
* space_id - The address space ID * space_id - The address space ID
* function - Passed to _REG: On (1) or Off (0) * function - Passed to _REG: On (1) or Off (0)
* *
@ -679,7 +685,7 @@ cleanup1:
******************************************************************************/ ******************************************************************************/
void void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function) acpi_adr_space_type space_id, u32 function)
{ {
struct acpi_reg_walk_info info; struct acpi_reg_walk_info info;
@ -713,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
* regions and _REG methods. (i.e. handlers must be installed for all * regions and _REG methods. (i.e. handlers must be installed for all
* regions of this Space ID before we can run any _REG methods) * regions of this Space ID before we can run any _REG methods)
*/ */
(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
&info, NULL); &info, NULL);
@ -814,7 +820,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
* *
******************************************************************************/ ******************************************************************************/
void static void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id) acpi_adr_space_type space_id)
{ {

View File

@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device,
/* Run all _REG methods for this address space */ /* Run all _REG methods for this address space */
if (run_reg) { if (run_reg) {
acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
ACPI_REG_CONNECT);
} }
unlock_and_exit: unlock_and_exit:
@ -263,6 +264,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
* FUNCTION: acpi_execute_reg_methods * FUNCTION: acpi_execute_reg_methods
* *
* PARAMETERS: device - Handle for the device * PARAMETERS: device - Handle for the device
* max_depth - Depth to which search for _REG
* space_id - The address space ID * space_id - The address space ID
* *
* RETURN: Status * RETURN: Status
@ -271,7 +273,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
* *
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) acpi_execute_reg_methods(acpi_handle device, u32 max_depth,
acpi_adr_space_type space_id)
{ {
struct acpi_namespace_node *node; struct acpi_namespace_node *node;
acpi_status status; acpi_status status;
@ -296,7 +299,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
/* Run all _REG methods for this address space */ /* Run all _REG methods for this address space */
acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); acpi_ev_execute_reg_methods(node, max_depth, space_id,
ACPI_REG_CONNECT);
} else { } else {
status = AE_BAD_PARAMETER; status = AE_BAD_PARAMETER;
} }
@ -306,57 +310,3 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
} }
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
/*******************************************************************************
*
* FUNCTION: acpi_execute_orphan_reg_method
*
* PARAMETERS: device - Handle for the device
* space_id - The address space ID
*
* RETURN: Status
*
* DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
* device. This is a _REG method that has no corresponding region
* within the device's scope.
*
******************************************************************************/
acpi_status
acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
/* Parameter validation */
if (!device) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Convert and validate the device handle */
node = acpi_ns_validate_handle(device);
if (node) {
/*
* If an "orphan" _REG method is present in the device's scope
* for the given address space ID, run it.
*/
acpi_ev_execute_orphan_reg_method(node, space_id);
} else {
status = AE_BAD_PARAMETER;
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)

View File

@ -1487,12 +1487,13 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
bool call_reg) bool call_reg)
{ {
acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_status status; acpi_status status;
acpi_ec_start(ec, false); acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_ec_enter_noirq(ec); acpi_ec_enter_noirq(ec);
status = acpi_install_address_space_handler_no_reg(scope_handle, status = acpi_install_address_space_handler_no_reg(scope_handle,
ACPI_ADR_SPACE_EC, ACPI_ADR_SPACE_EC,
@ -1506,10 +1507,7 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
} }
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC); acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC);
if (scope_handle != ec->handle)
acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
} }
@ -1724,6 +1722,12 @@ static void acpi_ec_remove(struct acpi_device *device)
} }
} }
void acpi_ec_register_opregions(struct acpi_device *adev)
{
if (first_ec && first_ec->handle != adev->handle)
acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC);
}
static acpi_status static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context) ec_parse_io_ports(struct acpi_resource *resource, void *context)
{ {

View File

@ -204,6 +204,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func, acpi_handle handle, acpi_ec_query_func func,
void *data); void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
void acpi_ec_register_opregions(struct acpi_device *adev);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void); void acpi_ec_flush_work(void);

View File

@ -2198,6 +2198,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
if (device->handler) if (device->handler)
goto ok; goto ok;
acpi_ec_register_opregions(device);
if (!device->flags.initialized) { if (!device->flags.initialized) {
device->flags.power_manageable = device->flags.power_manageable =
device->power.states[ACPI_STATE_D0].flags.valid; device->power.states[ACPI_STATE_D0].flags.valid;

View File

@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len; rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) { if (stat & SAR_RSQE_EPDU) {
unsigned int len, truesize;
unsigned char *l1l2; unsigned char *l1l2;
unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6); l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc; ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb); __net_timestamp(skb);
truesize = skb->truesize;
vcc->push(vcc, skb); vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx); atomic_inc(&vcc->stats->rx);
if (skb->truesize > SAR_FB_SIZE_3) if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
else if (skb->truesize > SAR_FB_SIZE_2) else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1); add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
else if (skb->truesize > SAR_FB_SIZE_1) else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1); add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1); add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);

View File

@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillyusb"; static const char xillyname[] = "xillyusb";
static unsigned int fifo_buf_order; static unsigned int fifo_buf_order;
static struct workqueue_struct *wakeup_wq;
#define USB_VENDOR_ID_XILINX 0x03fd #define USB_VENDOR_ID_XILINX 0x03fd
#define USB_VENDOR_ID_ALTERA 0x09fb #define USB_VENDOR_ID_ALTERA 0x09fb
@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
* errors if executed. The mechanism relies on that xdev->error is assigned * errors if executed. The mechanism relies on that xdev->error is assigned
* a non-zero value by report_io_error() prior to queueing wakeup_all(), * a non-zero value by report_io_error() prior to queueing wakeup_all(),
* which prevents bulk_in_work() from calling process_bulk_in(). * which prevents bulk_in_work() from calling process_bulk_in().
*
* The fact that wakeup_all() and bulk_in_work() are queued on the same
* workqueue makes their concurrent execution very unlikely, however the
* kernel's API doesn't seem to ensure this strictly.
*/ */
static void wakeup_all(struct work_struct *work) static void wakeup_all(struct work_struct *work)
@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
if (do_once) { if (do_once) {
kref_get(&xdev->kref); /* xdev is used by work item */ kref_get(&xdev->kref); /* xdev is used by work item */
queue_work(xdev->workq, &xdev->wakeup_workitem); queue_work(wakeup_wq, &xdev->wakeup_workitem);
} }
} }
@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
{ {
struct usb_device *udev = xdev->udev;
/* Verify that device has the two fundamental bulk in/out endpoints */
if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
return -ENODEV;
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT, xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
bulk_out_work, 1, 2); bulk_out_work, 1, 2);
if (!xdev->msg_ep) if (!xdev->msg_ep)
@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
__le16 *chandesc, __le16 *chandesc,
int num_channels) int num_channels)
{ {
struct xillyusb_channel *chan; struct usb_device *udev = xdev->udev;
struct xillyusb_channel *chan, *new_channels;
int i; int i;
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL); chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
if (!chan) if (!chan)
return -ENOMEM; return -ENOMEM;
xdev->channels = chan; new_channels = chan;
for (i = 0; i < num_channels; i++, chan++) { for (i = 0; i < num_channels; i++, chan++) {
unsigned int in_desc = le16_to_cpu(*chandesc++); unsigned int in_desc = le16_to_cpu(*chandesc++);
@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
*/ */
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
if (usb_pipe_type_check(udev,
usb_sndbulkpipe(udev, i + 2))) {
dev_err(xdev->dev,
"Missing BULK OUT endpoint %d\n",
i + 2);
kfree(new_channels);
return -ENODEV;
}
chan->writable = 1; chan->writable = 1;
chan->out_synchronous = !!(out_desc & 0x40); chan->out_synchronous = !!(out_desc & 0x40);
chan->out_seekable = !!(out_desc & 0x20); chan->out_seekable = !!(out_desc & 0x20);
@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
} }
} }
xdev->channels = new_channels;
return 0; return 0;
} }
@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
* just after responding with the IDT, there is no reason for any * just after responding with the IDT, there is no reason for any
* work item to be running now. To be sure that xdev->channels * work item to be running now. To be sure that xdev->channels
* is updated on anything that might run in parallel, flush the * is updated on anything that might run in parallel, flush the
* workqueue, which rarely does anything. * device's workqueue and the wakeup work item. This rarely
* does anything.
*/ */
flush_workqueue(xdev->workq); flush_workqueue(xdev->workq);
flush_work(&xdev->wakeup_workitem);
xdev->num_channels = num_channels; xdev->num_channels = num_channels;
@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
{ {
int rc = 0; int rc = 0;
wakeup_wq = alloc_workqueue(xillyname, 0, 0);
if (!wakeup_wq)
return -ENOMEM;
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT) if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT; fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
else else
@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
rc = usb_register(&xillyusb_driver); rc = usb_register(&xillyusb_driver);
if (rc)
destroy_workqueue(wakeup_wq);
return rc; return rc;
} }
static void __exit xillyusb_exit(void) static void __exit xillyusb_exit(void)
{ {
usb_deregister(&xillyusb_driver); usb_deregister(&xillyusb_driver);
destroy_workqueue(wakeup_wq);
} }
module_init(xillyusb_init); module_init(xillyusb_init);

View File

@ -329,12 +329,12 @@ struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
if (!ctx) if (!ctx)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
for (i = 0; i < nr_plls; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->node = np; ctx->node = np;
ctx->reg_base = base; ctx->reg_base = base;
ctx->clk_data.num = nr_plls; ctx->clk_data.num = nr_plls;
for (i = 0; i < nr_plls; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
return ctx; return ctx;
} }

View File

@ -290,18 +290,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
switch (event) { switch (event) {
case PRE_RATE_CHANGE: case PRE_RATE_CHANGE:
{ {
int psv; unsigned long psv;
psv = DIV_ROUND_CLOSEST(ndata->new_rate, psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
gt_target_rate); if (!psv ||
abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD; return NOTIFY_BAD;
psv--; psv--;
/* prescaler within legal range? */ /* prescaler within legal range? */
if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX) if (psv > GT_CONTROL_PRESCALER_MAX)
return NOTIFY_BAD; return NOTIFY_BAD;
/* /*

View File

@ -659,6 +659,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res)); memset(&res, 0, sizeof(res));
res.mce = mce; res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR; res.addr = mce->addr & MCI_ADDR_PHYSADDR;
if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
return NOTIFY_DONE;
}
/* Try driver decoder first */ /* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) { if (!(driver_decode && driver_decode(&res))) {

View File

@ -522,7 +522,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{ {
cs_dsp_debugfs_clear(dsp); cs_dsp_debugfs_clear(dsp);
debugfs_remove_recursive(dsp->debugfs_root); debugfs_remove_recursive(dsp->debugfs_root);
dsp->debugfs_root = NULL; dsp->debugfs_root = ERR_PTR(-ENODEV);
} }
EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP); EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
#else #else
@ -2343,6 +2343,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
mutex_init(&dsp->pwr_lock); mutex_init(&dsp->pwr_lock);
#ifdef CONFIG_DEBUG_FS
/* Ensure this is invalid if client never provides a debugfs root */
dsp->debugfs_root = ERR_PTR(-ENODEV);
#endif
return 0; return 0;
} }

View File

@ -39,6 +39,8 @@
#define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14 #define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14
#define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18 #define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
struct mlxbf3_gpio_context { struct mlxbf3_gpio_context {
struct gpio_chip gc; struct gpio_chip gc;
@ -82,6 +84,8 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0); val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val &= ~BIT(offset); val &= ~BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0); writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags); raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
gpiochip_disable_irq(gc, offset); gpiochip_disable_irq(gc, offset);
@ -253,6 +257,15 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
return 0; return 0;
} }
static void mlxbf3_gpio_shutdown(struct platform_device *pdev)
{
struct mlxbf3_gpio_context *gs = platform_get_drvdata(pdev);
/* Disable and clear all interrupts */
writel(0, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
writel(MLXBF_GPIO_CLR_ALL_INTS, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
}
static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = { static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = {
{ "MLNXBF33", 0 }, { "MLNXBF33", 0 },
{} {}
@ -265,6 +278,7 @@ static struct platform_driver mlxbf3_gpio_driver = {
.acpi_match_table = mlxbf3_gpio_acpi_match, .acpi_match_table = mlxbf3_gpio_acpi_match,
}, },
.probe = mlxbf3_gpio_probe, .probe = mlxbf3_gpio_probe,
.shutdown = mlxbf3_gpio_shutdown,
}; };
module_platform_driver(mlxbf3_gpio_driver); module_platform_driver(mlxbf3_gpio_driver);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/init.h> #include <linux/init.h>
@ -774,15 +775,15 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
struct gpio_desc *desc; struct gpio_desc *desc;
struct gpio_chip *chip = gdev->chip; struct gpio_chip *chip = gdev->chip;
scoped_guard(mutex, &sysfs_lock) {
if (!gdev->mockdev) if (!gdev->mockdev)
return; return;
device_unregister(gdev->mockdev); device_unregister(gdev->mockdev);
/* prevent further gpiod exports */ /* prevent further gpiod exports */
mutex_lock(&sysfs_lock);
gdev->mockdev = NULL; gdev->mockdev = NULL;
mutex_unlock(&sysfs_lock); }
/* unregister gpiod class devices owned by sysfs */ /* unregister gpiod class devices owned by sysfs */
for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) { for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {

View File

@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv); struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory( int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,

View File

@ -733,7 +733,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
enum dma_data_direction dir; enum dma_data_direction dir;
if (unlikely(!ttm->sg)) { if (unlikely(!ttm->sg)) {
pr_err("SG Table of BO is UNEXPECTEDLY NULL"); pr_debug("SG Table of BO is NULL");
return; return;
} }
@ -1202,8 +1202,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update); amdgpu_sync_fence(sync, bo_va->last_pt_update);
kfd_mem_dmaunmap_attachment(mem, entry);
} }
static int update_gpuvm_pte(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem,
@ -1258,6 +1256,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
update_gpuvm_pte_failed: update_gpuvm_pte_failed:
unmap_bo_from_gpuvm(mem, entry, sync); unmap_bo_from_gpuvm(mem, entry, sync);
kfd_mem_dmaunmap_attachment(mem, entry);
return ret; return ret;
} }
@ -1862,8 +1861,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mem->va + bo_size * (1 + mem->aql_queue)); mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */ /* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list) list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
kfd_mem_dmaunmap_attachment(mem, entry);
kfd_mem_detach(entry); kfd_mem_detach(entry);
}
ret = unreserve_bo_and_vms(&ctx, false, false); ret = unreserve_bo_and_vms(&ctx, false, false);
@ -2037,6 +2038,37 @@ out:
return ret; return ret;
} }
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
ret = amdgpu_bo_reserve(mem->bo, true);
if (ret)
goto out;
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != vm)
continue;
if (entry->bo_va->base.bo->tbo.ttm &&
!entry->bo_va->base.bo->tbo.ttm->sg)
continue;
kfd_mem_dmaunmap_attachment(mem, entry);
}
amdgpu_bo_unreserve(mem->bo);
out:
mutex_unlock(&mem->lock);
return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{ {

View File

@ -1057,6 +1057,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
r = amdgpu_ring_parse_cs(ring, p, job, ib); r = amdgpu_ring_parse_cs(ring, p, job, ib);
if (r) if (r)
return r; return r;
if (ib->sa_bo)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
} else { } else {
ib->ptr = (uint32_t *)kptr; ib->ptr = (uint32_t *)kptr;
r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);

View File

@ -684,16 +684,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX: case AMDGPU_CTX_OP_ALLOC_CTX:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id; args->out.alloc.ctx_id = id;
break; break;
case AMDGPU_CTX_OP_FREE_CTX: case AMDGPU_CTX_OP_FREE_CTX:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_free(fpriv, id); r = amdgpu_ctx_free(fpriv, id);
break; break;
case AMDGPU_CTX_OP_QUERY_STATE: case AMDGPU_CTX_OP_QUERY_STATE:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_query(adev, fpriv, id, &args->out); r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break; break;
case AMDGPU_CTX_OP_QUERY_STATE2: case AMDGPU_CTX_OP_QUERY_STATE2:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break; break;
case AMDGPU_CTX_OP_GET_STABLE_PSTATE: case AMDGPU_CTX_OP_GET_STABLE_PSTATE:

View File

@ -166,6 +166,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
if (ret) if (ret)
return -EFAULT; return -EFAULT;
if (ta_bin_len > PSP_1_MEG)
return -EINVAL;
copy_pos += sizeof(uint32_t); copy_pos += sizeof(uint32_t);
ta_bin = kzalloc(ta_bin_len, GFP_KERNEL); ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);

View File

@ -135,6 +135,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
} }
} }
/* from vcn4 and above, only unified queue is used */
adev->vcn.using_unified_queue =
adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0);
hdr = (const struct common_firmware_header *)adev->vcn.fw->data; hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@ -259,18 +263,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
return 0; return 0;
} }
/* from vcn4 and above, only unified queue is used */
static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool ret = false;
if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
ret = true;
return ret;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{ {
bool ret = false; bool ret = false;
@ -380,7 +372,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) for (i = 0; i < adev->vcn.num_enc_rings; ++i)
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
!adev->vcn.using_unified_queue) {
struct dpg_pause_state new_state; struct dpg_pause_state new_state;
if (fence[j] || if (fence[j] ||
@ -426,7 +420,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE); AMD_PG_STATE_UNGATE);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
!adev->vcn.using_unified_queue) {
struct dpg_pause_state new_state; struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
@ -452,8 +448,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev;
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
!adev->vcn.using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
atomic_dec(&ring->adev->vcn.total_submission_cnt); atomic_dec(&ring->adev->vcn.total_submission_cnt);
@ -707,12 +707,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
bool sq = amdgpu_vcn_using_unified_queue(ring);
uint32_t *ib_checksum; uint32_t *ib_checksum;
uint32_t ib_pack_in_dw; uint32_t ib_pack_in_dw;
int i, r; int i, r;
if (sq) if (adev->vcn.using_unified_queue)
ib_size_dw += 8; ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@ -725,7 +724,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0; ib->length_dw = 0;
/* single queue headers */ /* single queue headers */
if (sq) { if (adev->vcn.using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */ + 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@ -744,7 +743,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i) for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
if (sq) if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f); r = amdgpu_job_submit_direct(job, ring, &f);
@ -834,15 +833,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct dma_fence **fence) struct dma_fence **fence)
{ {
unsigned int ib_size_dw = 16; unsigned int ib_size_dw = 16;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL; uint32_t *ib_checksum = NULL;
uint64_t addr; uint64_t addr;
bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r; int i, r;
if (sq) if (adev->vcn.using_unified_queue)
ib_size_dw += 8; ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@ -856,7 +855,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0; ib->length_dw = 0;
if (sq) if (adev->vcn.using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000018;
@ -878,7 +877,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i) for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
if (sq) if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f); r = amdgpu_job_submit_direct(job, ring, &f);
@ -901,15 +900,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
struct dma_fence **fence) struct dma_fence **fence)
{ {
unsigned int ib_size_dw = 16; unsigned int ib_size_dw = 16;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL; uint32_t *ib_checksum = NULL;
uint64_t addr; uint64_t addr;
bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r; int i, r;
if (sq) if (adev->vcn.using_unified_queue)
ib_size_dw += 8; ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@ -923,7 +922,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0; ib->length_dw = 0;
if (sq) if (adev->vcn.using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000018;
@ -945,7 +944,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i) for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
if (sq) if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f); r = amdgpu_job_submit_direct(job, ring, &f);

View File

@ -284,6 +284,7 @@ struct amdgpu_vcn {
uint16_t inst_mask; uint16_t inst_mask;
uint8_t num_inst_per_aid; uint8_t num_inst_per_aid;
bool using_unified_queue;
}; };
struct amdgpu_fw_shared_rb_ptrs_struct { struct amdgpu_fw_shared_rb_ptrs_struct {

View File

@ -766,11 +766,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry) struct amdgpu_vm_bo_base *entry)
{ {
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->bo, *pbo; struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm; struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags; uint64_t pde, pt, flags;
unsigned int level; unsigned int level;
if (WARN_ON(!parent))
return -EINVAL;
bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level) for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent; pbo = pbo->parent;

View File

@ -7892,22 +7892,15 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid) unsigned int vmid)
{ {
u32 reg, data; u32 data;
/* not for *_SOC15 */ /* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); data = RREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
} }
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)

View File

@ -4961,23 +4961,16 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{ {
u32 reg, data; u32 data;
amdgpu_gfx_off_ctrl(adev, false); amdgpu_gfx_off_ctrl(adev, false);
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); data = RREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
amdgpu_gfx_off_ctrl(adev, true); amdgpu_gfx_off_ctrl(adev, true);
} }

View File

@ -39,7 +39,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev) static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{ {
char fw_name[40]; char fw_name[45];
char ucode_prefix[30]; char ucode_prefix[30];
int err; int err;
const struct imu_firmware_header_v1_0 *imu_hdr; const struct imu_firmware_header_v1_0 *imu_hdr;

View File

@ -543,11 +543,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0)); 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4))); amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0)); 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4))); amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0)); 0, 0, PACKETJ_TYPE0));

View File

@ -23,6 +23,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_jpeg.h" #include "amdgpu_jpeg.h"
#include "amdgpu_cs.h"
#include "soc15.h" #include "soc15.h"
#include "soc15d.h" #include "soc15d.h"
#include "jpeg_v4_0_3.h" #include "jpeg_v4_0_3.h"
@ -769,11 +770,15 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0)); 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
if (ring->funcs->parse_cs)
amdgpu_ring_write(ring, 0);
else
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0)); 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4))); amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0)); 0, 0, PACKETJ_TYPE0));
@ -1052,6 +1057,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
.parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
.emit_frame_size = .emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@ -1216,3 +1222,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
{ {
adev->jpeg.ras = &jpeg_v4_0_3_ras; adev->jpeg.ras = &jpeg_v4_0_3_ras;
} }
/**
* jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
*
* @parser: Command submission parser context
* @job: the job to parse
* @ib: the IB to parse
*
* Parse the command stream, return -EINVAL for invalid packet,
* 0 otherwise
*/
int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
uint32_t i, reg, res, cond, type;
struct amdgpu_device *adev = parser->adev;
for (i = 0; i < ib->length_dw ; i += 2) {
reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
res = CP_PACKETJ_GET_RES(ib->ptr[i]);
cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
if (res) /* only support 0 at the moment */
return -EINVAL;
switch (type) {
case PACKETJ_TYPE0:
if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE3:
if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE6:
if (ib->ptr[i] == CP_PACKETJ_NOP)
continue;
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
default:
dev_err(adev->dev, "Unknown packet type %d !\n", type);
return -EINVAL;
}
}
return 0;
}

View File

@ -46,6 +46,12 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
#define JPEG_REG_RANGE_START 0x4000
#define JPEG_REG_RANGE_END 0x41c2
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block; extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib);
#endif /* __JPEG_V4_0_3_H__ */ #endif /* __JPEG_V4_0_3_H__ */

View File

@ -76,6 +76,12 @@
((cond & 0xF) << 24) | \ ((cond & 0xF) << 24) | \
((type & 0xF) << 28)) ((type & 0xF) << 28))
#define CP_PACKETJ_NOP 0x60000000
#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF)
#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F)
#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF)
#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF)
/* Packet 3 types */ /* Packet 3 types */
#define PACKET3_NOP 0x10 #define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11 #define PACKET3_SET_BASE 0x11

View File

@ -1432,17 +1432,23 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto sync_memory_failed; goto sync_memory_failed;
} }
} }
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */ /* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) { for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd)) if (WARN_ON_ONCE(!peer_pdd))
continue; continue;
if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
if (err)
goto sync_memory_failed;
} }
}
mutex_unlock(&p->mutex);
kfree(devices_arr); kfree(devices_arr);
return 0; return 0;

View File

@ -3521,7 +3521,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x (int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width + <= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) { pipe_ctx->plane_state->src_rect.x) {
pos_cpy.x = temp_x + viewport_width; pos_cpy.x = 2 * viewport_width - temp_x;
} }
} }
} else { } else {
@ -3614,7 +3614,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x (int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width + <= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) { pipe_ctx->plane_state->src_rect.x) {
pos_cpy.x = 2 * viewport_width - temp_x; pos_cpy.x = temp_x + viewport_width;
} }
} }
} else { } else {

View File

@ -1756,6 +1756,9 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1; dc->caps.color.mpc.ocsc = 1;
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.dc_mode_clk_limit_support = true; dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */ /* read VBIOS LTTPR caps */
{ {

View File

@ -1471,7 +1471,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL; return -EINVAL;
} }
static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
enum amd_pp_sensors sensor, enum amd_pp_sensors sensor,
void *query) void *query)
{ {
@ -2787,7 +2787,7 @@ static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
return sysfs_emit(buf, "vddnb\n"); return sysfs_emit(buf, "vddnb\n");
} }
static unsigned int amdgpu_hwmon_get_power(struct device *dev, static int amdgpu_hwmon_get_power(struct device *dev,
enum amd_pp_sensors sensor) enum amd_pp_sensors sensor)
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
@ -2809,7 +2809,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
unsigned int val; int val;
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER); val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
if (val < 0) if (val < 0)
@ -2822,7 +2822,7 @@ static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
unsigned int val; int val;
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER); val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
if (val < 0) if (val < 0)

View File

@ -9,6 +9,7 @@
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h> #include <linux/media-bus-format.h>
#include <linux/minmax.h> #include <linux/minmax.h>
#include <linux/module.h> #include <linux/module.h>
@ -157,6 +158,7 @@ struct tc358768_priv {
u32 frs; /* PLL Freqency range for HSCK (post divider) */ u32 frs; /* PLL Freqency range for HSCK (post divider) */
u32 dsiclk; /* pll_clk / 2 */ u32 dsiclk; /* pll_clk / 2 */
u32 pclk; /* incoming pclk rate */
}; };
static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
@ -380,6 +382,7 @@ found:
priv->prd = best_prd; priv->prd = best_prd;
priv->frs = frs; priv->frs = frs;
priv->dsiclk = best_pll / 2; priv->dsiclk = best_pll / 2;
priv->pclk = mode->clock * 1000;
return 0; return 0;
} }
@ -638,6 +641,28 @@ static u32 tc358768_ps_to_ns(u32 ps)
return ps / 1000; return ps / 1000;
} }
static u32 tc358768_dpi_to_ns(u32 val, u32 pclk)
{
return (u32)div_u64((u64)val * NANO, pclk);
}
/* Convert value in DPI pixel clock units to DSI byte count */
static u32 tc358768_dpi_to_dsi_bytes(struct tc358768_priv *priv, u32 val)
{
u64 m = (u64)val * priv->dsiclk / 4 * priv->dsi_lanes;
u64 n = priv->pclk;
return (u32)div_u64(m + n - 1, n);
}
static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
{
u64 m = (u64)val * NANO;
u64 n = priv->dsiclk / 4 * priv->dsi_lanes;
return (u32)div_u64(m, n);
}
static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
{ {
struct tc358768_priv *priv = bridge_to_tc358768(bridge); struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@ -647,11 +672,19 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
s32 raw_val; s32 raw_val;
const struct drm_display_mode *mode; const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps; u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk, video_start; u32 dsiclk, hsbyteclk;
const u32 internal_delay = 40;
int ret, i; int ret, i;
struct videomode vm; struct videomode vm;
struct device *dev = priv->dev; struct device *dev = priv->dev;
/* In pixelclock units */
u32 dpi_htot, dpi_data_start;
/* In byte units */
u32 dsi_dpi_htot, dsi_dpi_data_start;
u32 dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp;
const u32 dsi_hss = 4; /* HSS is a short packet (4 bytes) */
/* In hsbyteclk units */
u32 dsi_vsdly;
const u32 internal_dly = 40;
if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n"); dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
@ -686,27 +719,23 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
case MIPI_DSI_FMT_RGB888: case MIPI_DSI_FMT_RGB888:
val |= (0x3 << 4); val |= (0x3 << 4);
hact = vm.hactive * 3; hact = vm.hactive * 3;
video_start = (vm.hsync_len + vm.hback_porch) * 3;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
break; break;
case MIPI_DSI_FMT_RGB666: case MIPI_DSI_FMT_RGB666:
val |= (0x4 << 4); val |= (0x4 << 4);
hact = vm.hactive * 3; hact = vm.hactive * 3;
video_start = (vm.hsync_len + vm.hback_porch) * 3;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
break; break;
case MIPI_DSI_FMT_RGB666_PACKED: case MIPI_DSI_FMT_RGB666_PACKED:
val |= (0x4 << 4) | BIT(3); val |= (0x4 << 4) | BIT(3);
hact = vm.hactive * 18 / 8; hact = vm.hactive * 18 / 8;
video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
break; break;
case MIPI_DSI_FMT_RGB565: case MIPI_DSI_FMT_RGB565:
val |= (0x5 << 4); val |= (0x5 << 4);
hact = vm.hactive * 2; hact = vm.hactive * 2;
video_start = (vm.hsync_len + vm.hback_porch) * 2;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
break; break;
default: default:
@ -716,9 +745,152 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return; return;
} }
/*
* There are three important things to make TC358768 work correctly,
* which are not trivial to manage:
*
* 1. Keep the DPI line-time and the DSI line-time as close to each
* other as possible.
* 2. TC358768 goes to LP mode after each line's active area. The DSI
* HFP period has to be long enough for entering and exiting LP mode.
* But it is not clear how to calculate this.
* 3. VSDly (video start delay) has to be long enough to ensure that the
* DSI TX does not start transmitting until we have started receiving
* pixel data from the DPI input. It is not clear how to calculate
* this either.
*/
dpi_htot = vm.hactive + vm.hfront_porch + vm.hsync_len + vm.hback_porch;
dpi_data_start = vm.hsync_len + vm.hback_porch;
dev_dbg(dev, "dpi horiz timing (pclk): %u + %u + %u + %u = %u\n",
vm.hsync_len, vm.hback_porch, vm.hactive, vm.hfront_porch,
dpi_htot);
dev_dbg(dev, "dpi horiz timing (ns): %u + %u + %u + %u = %u\n",
tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
tc358768_dpi_to_ns(vm.hactive, vm.pixelclock),
tc358768_dpi_to_ns(vm.hfront_porch, vm.pixelclock),
tc358768_dpi_to_ns(dpi_htot, vm.pixelclock));
dev_dbg(dev, "dpi data start (ns): %u + %u = %u\n",
tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
tc358768_dpi_to_ns(dpi_data_start, vm.pixelclock));
dsi_dpi_htot = tc358768_dpi_to_dsi_bytes(priv, dpi_htot);
dsi_dpi_data_start = tc358768_dpi_to_dsi_bytes(priv, dpi_data_start);
if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, vm.hsync_len);
dsi_hbp = tc358768_dpi_to_dsi_bytes(priv, vm.hback_porch);
} else {
/* HBP is included in HSW in event mode */
dsi_hbp = 0;
dsi_hsw = tc358768_dpi_to_dsi_bytes(priv,
vm.hsync_len +
vm.hback_porch);
/*
* The pixel packet includes the actual pixel data, and:
* DSI packet header = 4 bytes
* DCS code = 1 byte
* DSI packet footer = 2 bytes
*/
dsi_hact = hact + 4 + 1 + 2;
dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
/*
* Here we should check if HFP is long enough for entering LP
* and exiting LP, but it's not clear how to calculate that.
* Instead, this is a naive algorithm that just adjusts the HFP
* and HSW so that HFP is (at least) roughly 2/3 of the total
* blanking time.
*/
if (dsi_hfp < (dsi_hfp + dsi_hsw + dsi_hss) * 2 / 3) {
u32 old_hfp = dsi_hfp;
u32 old_hsw = dsi_hsw;
u32 tot = dsi_hfp + dsi_hsw + dsi_hss;
dsi_hsw = tot / 3;
/*
* Seems like sometimes HSW has to be divisible by num-lanes, but
* not always...
*/
dsi_hsw = roundup(dsi_hsw, priv->dsi_lanes);
dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
dev_dbg(dev,
"hfp too short, adjusting dsi hfp and dsi hsw from %u, %u to %u, %u\n",
old_hfp, old_hsw, dsi_hfp, dsi_hsw);
}
dev_dbg(dev,
"dsi horiz timing (bytes): %u, %u + %u + %u + %u = %u\n",
dsi_hss, dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp,
dsi_hss + dsi_hsw + dsi_hbp + dsi_hact + dsi_hfp);
dev_dbg(dev, "dsi horiz timing (ns): %u + %u + %u + %u + %u = %u\n",
tc358768_dsi_bytes_to_ns(priv, dsi_hss),
tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
tc358768_dsi_bytes_to_ns(priv, dsi_hact),
tc358768_dsi_bytes_to_ns(priv, dsi_hfp),
tc358768_dsi_bytes_to_ns(priv, dsi_hss + dsi_hsw +
dsi_hbp + dsi_hact + dsi_hfp));
}
/* VSDly calculation */
/* Start with the HW internal delay */
dsi_vsdly = internal_dly;
/* Convert to byte units as the other variables are in byte units */
dsi_vsdly *= priv->dsi_lanes;
/* Do we need more delay, in addition to the internal? */
if (dsi_dpi_data_start > dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp) {
dsi_vsdly = dsi_dpi_data_start - dsi_hss - dsi_hsw - dsi_hbp;
dsi_vsdly = roundup(dsi_vsdly, priv->dsi_lanes);
}
dev_dbg(dev, "dsi data start (bytes) %u + %u + %u + %u = %u\n",
dsi_vsdly, dsi_hss, dsi_hsw, dsi_hbp,
dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp);
dev_dbg(dev, "dsi data start (ns) %u + %u + %u + %u = %u\n",
tc358768_dsi_bytes_to_ns(priv, dsi_vsdly),
tc358768_dsi_bytes_to_ns(priv, dsi_hss),
tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
tc358768_dsi_bytes_to_ns(priv, dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp));
/* Convert back to hsbyteclk */
dsi_vsdly /= priv->dsi_lanes;
/*
* The docs say that there is an internal delay of 40 cycles.
* However, we get underflows if we follow that rule. If we
* instead ignore the internal delay, things work. So either
* the docs are wrong or the calculations are wrong.
*
* As a temporary fix, add the internal delay here, to counter
* the subtraction when writing the register.
*/
dsi_vsdly += internal_dly;
/* Clamp to the register max */
if (dsi_vsdly - internal_dly > 0x3ff) {
dev_warn(dev, "VSDly too high, underflows likely\n");
dsi_vsdly = 0x3ff + internal_dly;
}
/* VSDly[9:0] */ /* VSDly[9:0] */
video_start = max(video_start, internal_delay + 1) - internal_delay; tc358768_write(priv, TC358768_VSDLY, dsi_vsdly - internal_dly);
tc358768_write(priv, TC358768_VSDLY, video_start);
tc358768_write(priv, TC358768_DATAFMT, val); tc358768_write(priv, TC358768_DATAFMT, val);
tc358768_write(priv, TC358768_DSITX_DT, data_type); tc358768_write(priv, TC358768_DSITX_DT, data_type);
@ -826,18 +998,6 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
/* vbp */ /* vbp */
tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch); tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
/* hsw * byteclk * ndl / pclk */
val = (u32)div_u64(vm.hsync_len *
(u64)hsbyteclk * priv->dsi_lanes,
vm.pixelclock);
tc358768_write(priv, TC358768_DSI_HSW, val);
/* hbp * byteclk * ndl / pclk */
val = (u32)div_u64(vm.hback_porch *
(u64)hsbyteclk * priv->dsi_lanes,
vm.pixelclock);
tc358768_write(priv, TC358768_DSI_HBPR, val);
} else { } else {
/* Set event mode */ /* Set event mode */
tc358768_write(priv, TC358768_DSI_EVENT, 1); tc358768_write(priv, TC358768_DSI_EVENT, 1);
@ -851,17 +1011,14 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
/* vbp (not used in event mode) */ /* vbp (not used in event mode) */
tc358768_write(priv, TC358768_DSI_VBPR, 0); tc358768_write(priv, TC358768_DSI_VBPR, 0);
/* (hsw + hbp) * byteclk * ndl / pclk */
val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
(u64)hsbyteclk * priv->dsi_lanes,
vm.pixelclock);
tc358768_write(priv, TC358768_DSI_HSW, val);
/* hbp (not used in event mode) */
tc358768_write(priv, TC358768_DSI_HBPR, 0);
} }
/* hsw (bytes) */
tc358768_write(priv, TC358768_DSI_HSW, dsi_hsw);
/* hbp (bytes) */
tc358768_write(priv, TC358768_DSI_HBPR, dsi_hbp);
/* hact (bytes) */ /* hact (bytes) */
tc358768_write(priv, TC358768_DSI_HACT, hact); tc358768_write(priv, TC358768_DSI_HACT, hact);

View File

@ -166,6 +166,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
gp_write(LIMA_GP_CMD, cmd); gp_write(LIMA_GP_CMD, cmd);
} }
static int lima_gp_bus_stop_poll(struct lima_ip *ip)
{
return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED);
}
static int lima_gp_hard_reset_poll(struct lima_ip *ip) static int lima_gp_hard_reset_poll(struct lima_ip *ip)
{ {
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000); gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
@ -179,6 +184,13 @@ static int lima_gp_hard_reset(struct lima_ip *ip)
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000); gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
gp_write(LIMA_GP_INT_MASK, 0); gp_write(LIMA_GP_INT_MASK, 0);
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS);
ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100);
if (ret) {
dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
return ret;
}
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET); gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100); ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
if (ret) { if (ret) {

View File

@ -124,6 +124,8 @@ enum dpu_enc_rc_states {
* @base: drm_encoder base class for registration with DRM * @base: drm_encoder base class for registration with DRM
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enabled: True if the encoder is active, protected by enc_lock * @enabled: True if the encoder is active, protected by enc_lock
* @commit_done_timedout: True if there has been a timeout on commit after
* enabling the encoder.
* @num_phys_encs: Actual number of physical encoders contained. * @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed. * @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization * @cur_master: Pointer to the current master in this mode. Optimization
@ -172,6 +174,7 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock; spinlock_t enc_spinlock;
bool enabled; bool enabled;
bool commit_done_timedout;
unsigned int num_phys_encs; unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
@ -1116,8 +1119,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
cstate->num_mixers = num_lm; cstate->num_mixers = num_lm;
dpu_enc->connector = conn_state->connector;
for (i = 0; i < dpu_enc->num_phys_encs; i++) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@ -1210,6 +1211,11 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
mutex_lock(&dpu_enc->enc_lock); mutex_lock(&dpu_enc->enc_lock);
dpu_enc->commit_done_timedout = false;
dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@ -1265,7 +1271,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
trace_dpu_enc_disable(DRMID(drm_enc)); trace_dpu_enc_disable(DRMID(drm_enc));
/* wait for idle */ /* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); dpu_encoder_wait_for_tx_complete(drm_enc);
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
@ -2172,6 +2178,7 @@ static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
} }
static int dpu_encoder_virt_add_phys_encs( static int dpu_encoder_virt_add_phys_encs(
struct drm_device *dev,
struct msm_display_info *disp_info, struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc, struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params) struct dpu_enc_phys_init_params *params)
@ -2193,7 +2200,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (disp_info->intf_type == INTF_WB) { if (disp_info->intf_type == INTF_WB) {
enc = dpu_encoder_phys_wb_init(params); enc = dpu_encoder_phys_wb_init(dev, params);
if (IS_ERR(enc)) { if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
@ -2204,7 +2211,7 @@ static int dpu_encoder_virt_add_phys_encs(
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs; ++dpu_enc->num_phys_encs;
} else if (disp_info->is_cmd_mode) { } else if (disp_info->is_cmd_mode) {
enc = dpu_encoder_phys_cmd_init(params); enc = dpu_encoder_phys_cmd_init(dev, params);
if (IS_ERR(enc)) { if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
@ -2215,7 +2222,7 @@ static int dpu_encoder_virt_add_phys_encs(
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs; ++dpu_enc->num_phys_encs;
} else { } else {
enc = dpu_encoder_phys_vid_init(params); enc = dpu_encoder_phys_vid_init(dev, params);
if (IS_ERR(enc)) { if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
@ -2304,7 +2311,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
break; break;
} }
ret = dpu_encoder_virt_add_phys_encs(disp_info, ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
dpu_enc, &phys_params); dpu_enc, &phys_params);
if (ret) { if (ret) {
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
@ -2416,10 +2423,18 @@ fail:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, /**
enum msm_event_wait event) * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
* @drm_enc: encoder pointer
*
* Wait for hardware to have flushed the current pending changes to hardware at
* a vblank or CTL_START. Physical encoders will map this differently depending
* on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
*
* Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
*/
int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
{ {
int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
struct dpu_encoder_virt *dpu_enc = NULL; struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0; int i, ret = 0;
@ -2433,26 +2448,51 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
switch (event) { if (phys->ops.wait_for_commit_done) {
case MSM_ENC_COMMIT_DONE: DPU_ATRACE_BEGIN("wait_for_commit_done");
fn_wait = phys->ops.wait_for_commit_done; ret = phys->ops.wait_for_commit_done(phys);
break; DPU_ATRACE_END("wait_for_commit_done");
case MSM_ENC_TX_COMPLETE: if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
fn_wait = phys->ops.wait_for_tx_complete; dpu_enc->commit_done_timedout = true;
break; msm_disp_snapshot_state(drm_enc->dev);
case MSM_ENC_VBLANK: }
fn_wait = phys->ops.wait_for_vblank; if (ret)
break; return ret;
default: }
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
return -EINVAL;
} }
if (fn_wait) { return ret;
DPU_ATRACE_BEGIN("wait_for_completion_event"); }
ret = fn_wait(phys);
DPU_ATRACE_END("wait_for_completion_event"); /**
* dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
* @drm_enc: encoder pointer
*
* Wait for the hardware to transfer all the pixels to the panel. Physical
* encoders will map this differently depending on the type: vid mode -> vsync_irq,
* cmd mode -> pp_done.
*
* Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
*/
int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.wait_for_tx_complete) {
DPU_ATRACE_BEGIN("wait_for_tx_complete");
ret = phys->ops.wait_for_tx_complete(phys);
DPU_ATRACE_END("wait_for_tx_complete");
if (ret) if (ret)
return ret; return ret;
} }

View File

@ -93,25 +93,9 @@ void dpu_encoder_kickoff(struct drm_encoder *encoder);
*/ */
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time); int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
/** int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
* dpu_encoder_wait_for_event - Waits for encoder events
* @encoder: encoder pointer int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder);
* @event: event to wait for
* MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
* frames to hardware at a vblank or ctl_start
* Encoders will map this differently depending on the
* panel type.
* vid mode -> vsync_irq
* cmd mode -> ctl_start
* MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
* the panel. Encoders will map this differently
* depending on the panel type.
* vid mode -> vsync_irq
* cmd mode -> pp_done
* Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
*/
int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum msm_event_wait event);
/* /*
* dpu_encoder_get_intf_mode - get interface mode of the given encoder * dpu_encoder_get_intf_mode - get interface mode of the given encoder

View File

@ -106,7 +106,6 @@ struct dpu_encoder_phys_ops {
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc); void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
@ -281,22 +280,24 @@ struct dpu_encoder_wait_info {
* @p: Pointer to init params structure * @p: Pointer to init params structure
* Return: Error code or newly allocated encoder * Return: Error code or newly allocated encoder
*/ */
struct dpu_encoder_phys *dpu_encoder_phys_vid_init( struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p); struct dpu_enc_phys_init_params *p);
/** /**
* dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
* @dev: Corresponding device for devres management
* @p: Pointer to init params structure * @p: Pointer to init params structure
* Return: Error code or newly allocated encoder * Return: Error code or newly allocated encoder
*/ */
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p); struct dpu_enc_phys_init_params *p);
/** /**
* dpu_encoder_phys_wb_init - initialize writeback encoder * dpu_encoder_phys_wb_init - initialize writeback encoder
* @dev: Corresponding device for devres management
* @init: Pointer to init info structure with initialization params * @init: Pointer to init info structure with initialization params
*/ */
struct dpu_encoder_phys *dpu_encoder_phys_wb_init( struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p); struct dpu_enc_phys_init_params *p);
/** /**

View File

@ -13,6 +13,8 @@
#include "dpu_trace.h" #include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h" #include "disp/msm_disp_snapshot.h"
#include <drm/drm_managed.h>
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \ (e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \ (e)->base.parent->base.id : -1, \
@ -564,14 +566,6 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->enable_state = DPU_ENC_DISABLED; phys_enc->enable_state = DPU_ENC_DISABLED;
} }
static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
kfree(cmd_enc);
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff( static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
@ -687,33 +681,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
} }
static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys *phys_enc)
{
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return rc;
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
rc = dpu_encoder_helper_wait_for_irq(phys_enc,
phys_enc->irq[INTR_IDX_RDPTR],
dpu_encoder_phys_cmd_te_rd_ptr_irq,
&wait_info);
return rc;
}
static void dpu_encoder_phys_cmd_handle_post_kickoff( static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
@ -737,12 +704,10 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable; ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable; ops->disable = dpu_encoder_phys_cmd_disable;
ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done; ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff; ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete; ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start; ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush; ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->irq_control = dpu_encoder_phys_cmd_irq_control; ops->irq_control = dpu_encoder_phys_cmd_irq_control;
@ -752,7 +717,7 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
} }
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p) struct dpu_enc_phys_init_params *p)
{ {
struct dpu_encoder_phys *phys_enc = NULL; struct dpu_encoder_phys *phys_enc = NULL;
@ -760,7 +725,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG("intf\n"); DPU_DEBUG("intf\n");
cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL); cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
if (!cmd_enc) { if (!cmd_enc) {
DPU_ERROR("failed to allocate\n"); DPU_ERROR("failed to allocate\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -11,6 +11,8 @@
#include "dpu_trace.h" #include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h" #include "disp/msm_disp_snapshot.h"
#include <drm/drm_managed.h>
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->parent ? \ (e) && (e)->parent ? \
(e)->parent->base.id : -1, \ (e)->parent->base.id : -1, \
@ -441,13 +443,7 @@ skip_flush:
phys_enc->enable_state = DPU_ENC_ENABLING; phys_enc->enable_state = DPU_ENC_ENABLING;
} }
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) static int dpu_encoder_phys_vid_wait_for_tx_complete(
{
DPU_DEBUG_VIDENC(phys_enc, "\n");
kfree(phys_enc);
}
static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys *phys_enc)
{ {
struct dpu_encoder_wait_info wait_info; struct dpu_encoder_wait_info wait_info;
@ -561,7 +557,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
* scanout buffer) don't latch properly.. * scanout buffer) don't latch properly..
*/ */
if (dpu_encoder_phys_vid_is_master(phys_enc)) { if (dpu_encoder_phys_vid_is_master(phys_enc)) {
ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) { if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0); atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@ -581,7 +577,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
dpu_encoder_phys_inc_pending(phys_enc); dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) { if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0); atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@ -684,11 +680,9 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable; ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable; ops->disable = dpu_encoder_phys_vid_disable;
ops->destroy = dpu_encoder_phys_vid_destroy;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done; ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank; ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
ops->irq_control = dpu_encoder_phys_vid_irq_control; ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
@ -697,7 +691,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
} }
struct dpu_encoder_phys *dpu_encoder_phys_vid_init( struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p) struct dpu_enc_phys_init_params *p)
{ {
struct dpu_encoder_phys *phys_enc = NULL; struct dpu_encoder_phys *phys_enc = NULL;
@ -707,7 +701,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) { if (!phys_enc) {
DPU_ERROR("failed to create encoder due to memory allocation error\n"); DPU_ERROR("failed to create encoder due to memory allocation error\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -8,6 +8,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <drm/drm_framebuffer.h> #include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include "dpu_encoder_phys.h" #include "dpu_encoder_phys.h"
#include "dpu_formats.h" #include "dpu_formats.h"
@ -546,20 +547,6 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->enable_state = DPU_ENC_DISABLED; phys_enc->enable_state = DPU_ENC_DISABLED;
} }
/**
* dpu_encoder_phys_wb_destroy - destroy writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
kfree(phys_enc);
}
static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc, static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job) struct drm_writeback_job *job)
{ {
@ -655,7 +642,6 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable; ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable; ops->disable = dpu_encoder_phys_wb_disable;
ops->destroy = dpu_encoder_phys_wb_destroy;
ops->atomic_check = dpu_encoder_phys_wb_atomic_check; ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff; ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
@ -671,9 +657,10 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
/** /**
* dpu_encoder_phys_wb_init - initialize writeback encoder * dpu_encoder_phys_wb_init - initialize writeback encoder
* @dev: Corresponding device for devres management
* @p: Pointer to init info structure with initialization params * @p: Pointer to init info structure with initialization params
*/ */
struct dpu_encoder_phys *dpu_encoder_phys_wb_init( struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p) struct dpu_enc_phys_init_params *p)
{ {
struct dpu_encoder_phys *phys_enc = NULL; struct dpu_encoder_phys *phys_enc = NULL;
@ -686,7 +673,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL); wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL);
if (!wb_enc) { if (!wb_enc) {
DPU_ERROR("failed to allocate wb phys_enc enc\n"); DPU_ERROR("failed to allocate wb phys_enc enc\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -490,7 +490,7 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
* mode panels. This may be a no-op for command mode panels. * mode panels. This may be a no-op for command mode panels.
*/ */
trace_dpu_kms_wait_for_commit_done(DRMID(crtc)); trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE); ret = dpu_encoder_wait_for_commit_done(encoder);
if (ret && ret != -EWOULDBLOCK) { if (ret && ret != -EWOULDBLOCK) {
DPU_ERROR("wait for commit done returned %d\n", ret); DPU_ERROR("wait for commit done returned %d\n", ret);
break; break;

View File

@ -31,24 +31,14 @@
* @fmt: Pointer to format string * @fmt: Pointer to format string
*/ */
#define DPU_DEBUG(fmt, ...) \ #define DPU_DEBUG(fmt, ...) \
do { \ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
if (drm_debug_enabled(DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
/** /**
* DPU_DEBUG_DRIVER - macro for hardware driver logging * DPU_DEBUG_DRIVER - macro for hardware driver logging
* @fmt: Pointer to format string * @fmt: Pointer to format string
*/ */
#define DPU_DEBUG_DRIVER(fmt, ...) \ #define DPU_DEBUG_DRIVER(fmt, ...) \
do { \ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
if (drm_debug_enabled(DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__) #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__) #define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)

View File

@ -679,6 +679,9 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
new_state->fb, &layout); new_state->fb, &layout);
if (ret) { if (ret) {
DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
if (pstate->aspace)
msm_framebuffer_cleanup(new_state->fb, pstate->aspace,
pstate->needs_dirtyfb);
return ret; return ret;
} }
@ -792,6 +795,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
plane); plane);
int ret = 0, min_scale; int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe; struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
@ -860,14 +865,20 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
max_linewidth = pdpu->catalog->caps->max_linewidth; max_linewidth = pdpu->catalog->caps->max_linewidth;
if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { drm_rect_rotate(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
_dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
/* /*
* In parallel multirect case only the half of the usual width * In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that * is supported for tiled formats. If we are here, we know that
* full width is more than max_linewidth, thus each rect is * full width is more than max_linewidth, thus each rect is
* wider than allowed. * wider than allowed.
*/ */
if (DPU_FORMAT_IS_UBWC(fmt)) { if (DPU_FORMAT_IS_UBWC(fmt) &&
drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n", DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG; return -E2BIG;
@ -907,6 +918,14 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
} }
drm_rect_rotate_inv(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
if (r_pipe->sspp)
drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
if (ret) if (ret)
return ret; return ret;

View File

@ -1253,6 +1253,8 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
link_info.rate = ctrl->link->link_params.rate; link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
dp_link_reset_phy_params_vx_px(ctrl->link);
dp_aux_link_configure(ctrl->aux, &link_info); dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd)) if (drm_dp_max_downspread(dpcd))

View File

@ -136,22 +136,22 @@ end:
static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz) u32 mode_edid_bpp, u32 mode_pclk_khz)
{ {
struct dp_link_info *link_info; const struct dp_link_info *link_info;
const u32 max_supported_bpp = 30, min_supported_bpp = 18; const u32 max_supported_bpp = 30, min_supported_bpp = 18;
u32 bpp = 0, data_rate_khz = 0; u32 bpp, data_rate_khz;
bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); bpp = min(mode_edid_bpp, max_supported_bpp);
link_info = &dp_panel->link_info; link_info = &dp_panel->link_info;
data_rate_khz = link_info->num_lanes * link_info->rate * 8; data_rate_khz = link_info->num_lanes * link_info->rate * 8;
while (bpp > min_supported_bpp) { do {
if (mode_pclk_khz * bpp <= data_rate_khz) if (mode_pclk_khz * bpp <= data_rate_khz)
break;
bpp -= 6;
}
return bpp; return bpp;
bpp -= 6;
} while (bpp > min_supported_bpp);
return min_supported_bpp;
} }
static int dp_panel_update_modes(struct drm_connector *connector, static int dp_panel_update_modes(struct drm_connector *connector,
@ -444,8 +444,9 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
drm_mode->clock); drm_mode->clock);
drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
dp_panel->dp_mode.bpp = max_t(u32, 18, dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp,
min_t(u32, dp_panel->dp_mode.bpp, 30)); dp_panel->dp_mode.drm_mode.clock);
drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
dp_panel->dp_mode.bpp); dp_panel->dp_mode.bpp);

View File

@ -74,18 +74,6 @@ enum msm_dsi_controller {
#define MSM_GPU_MAX_RINGS 4 #define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2 #define MAX_H_TILES_PER_DISPLAY 2
/**
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
* @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
MSM_ENC_VBLANK,
};
/** /**
* struct msm_display_topology - defines a display topology pipeline * struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used * @num_lm: number of layer mixers used

View File

@ -76,7 +76,7 @@ static bool
wait_for_idle(struct drm_gem_object *obj) wait_for_idle(struct drm_gem_object *obj)
{ {
enum dma_resv_usage usage = dma_resv_usage_rw(true); enum dma_resv_usage usage = dma_resv_usage_rw(true);
return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0; return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
} }
static bool static bool

View File

@ -28,6 +28,8 @@
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
struct msm_mdss { struct msm_mdss {
struct device *dev; struct device *dev;
@ -40,8 +42,9 @@ struct msm_mdss {
struct irq_domain *domain; struct irq_domain *domain;
} irq_controller; } irq_controller;
const struct msm_mdss_data *mdss_data; const struct msm_mdss_data *mdss_data;
struct icc_path *path[2]; struct icc_path *mdp_path[2];
u32 num_paths; u32 num_mdp_paths;
struct icc_path *reg_bus_path;
}; };
static int msm_mdss_parse_data_bus_icc_path(struct device *dev, static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
@ -49,38 +52,34 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
{ {
struct icc_path *path0; struct icc_path *path0;
struct icc_path *path1; struct icc_path *path1;
struct icc_path *reg_bus_path;
path0 = of_icc_get(dev, "mdp0-mem"); path0 = devm_of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0)) if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0); return PTR_ERR_OR_ZERO(path0);
msm_mdss->path[0] = path0; msm_mdss->mdp_path[0] = path0;
msm_mdss->num_paths = 1; msm_mdss->num_mdp_paths = 1;
path1 = of_icc_get(dev, "mdp1-mem"); path1 = devm_of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) { if (!IS_ERR_OR_NULL(path1)) {
msm_mdss->path[1] = path1; msm_mdss->mdp_path[1] = path1;
msm_mdss->num_paths++; msm_mdss->num_mdp_paths++;
} }
reg_bus_path = of_icc_get(dev, "cpu-cfg");
if (!IS_ERR_OR_NULL(reg_bus_path))
msm_mdss->reg_bus_path = reg_bus_path;
return 0; return 0;
} }
static void msm_mdss_put_icc_path(void *data)
{
struct msm_mdss *msm_mdss = data;
int i;
for (i = 0; i < msm_mdss->num_paths; i++)
icc_put(msm_mdss->path[i]);
}
static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw) static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
{ {
int i; int i;
for (i = 0; i < msm_mdss->num_paths; i++) for (i = 0; i < msm_mdss->num_mdp_paths; i++)
icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw)); icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(bw));
} }
static void msm_mdss_irq(struct irq_desc *desc) static void msm_mdss_irq(struct irq_desc *desc)
@ -245,6 +244,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
*/ */
msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW); msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
icc_set_bw(msm_mdss->reg_bus_path, 0,
msm_mdss->mdss_data->reg_bus_bw);
else
icc_set_bw(msm_mdss->reg_bus_path, 0,
DEFAULT_REG_BW);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) { if (ret) {
dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
@ -298,6 +304,9 @@ static int msm_mdss_disable(struct msm_mdss *msm_mdss)
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
msm_mdss_icc_request_bw(msm_mdss, 0); msm_mdss_icc_request_bw(msm_mdss, 0);
if (msm_mdss->reg_bus_path)
icc_set_bw(msm_mdss->reg_bus_path, 0, 0);
return 0; return 0;
} }
@ -384,6 +393,8 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (!msm_mdss) if (!msm_mdss)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio)) if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio); return ERR_CAST(msm_mdss->mmio);
@ -391,9 +402,6 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
return ERR_PTR(ret);
ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -477,8 +485,6 @@ static int mdss_probe(struct platform_device *pdev)
if (IS_ERR(mdss)) if (IS_ERR(mdss))
return PTR_ERR(mdss); return PTR_ERR(mdss);
mdss->mdss_data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mdss); platform_set_drvdata(pdev, mdss);
/* /*
@ -512,18 +518,21 @@ static const struct msm_mdss_data msm8998_data = {
.ubwc_enc_version = UBWC_1_0, .ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0, .ubwc_dec_version = UBWC_1_0,
.highest_bank_bit = 2, .highest_bank_bit = 2,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data qcm2290_data = { static const struct msm_mdss_data qcm2290_data = {
/* no UBWC */ /* no UBWC */
.highest_bank_bit = 0x2, .highest_bank_bit = 0x2,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sc7180_data = { static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0, .ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e, .ubwc_static = 0x1e,
.highest_bank_bit = 0x3, .highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sc7280_data = { static const struct msm_mdss_data sc7280_data = {
@ -533,6 +542,7 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_static = 1, .ubwc_static = 1,
.highest_bank_bit = 1, .highest_bank_bit = 1,
.macrotile_mode = 1, .macrotile_mode = 1,
.reg_bus_bw = 74000,
}; };
static const struct msm_mdss_data sc8180x_data = { static const struct msm_mdss_data sc8180x_data = {
@ -540,6 +550,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_dec_version = UBWC_3_0, .ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3, .highest_bank_bit = 3,
.macrotile_mode = 1, .macrotile_mode = 1,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sc8280xp_data = { static const struct msm_mdss_data sc8280xp_data = {
@ -549,12 +560,14 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_static = 1, .ubwc_static = 1,
.highest_bank_bit = 2, .highest_bank_bit = 2,
.macrotile_mode = 1, .macrotile_mode = 1,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sdm845_data = { static const struct msm_mdss_data sdm845_data = {
.ubwc_enc_version = UBWC_2_0, .ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 2, .highest_bank_bit = 2,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sm6350_data = { static const struct msm_mdss_data sm6350_data = {
@ -563,12 +576,14 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_swizzle = 6, .ubwc_swizzle = 6,
.ubwc_static = 0x1e, .ubwc_static = 0x1e,
.highest_bank_bit = 1, .highest_bank_bit = 1,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sm8150_data = { static const struct msm_mdss_data sm8150_data = {
.ubwc_enc_version = UBWC_3_0, .ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0, .ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 2, .highest_bank_bit = 2,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sm6115_data = { static const struct msm_mdss_data sm6115_data = {
@ -577,6 +592,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_swizzle = 7, .ubwc_swizzle = 7,
.ubwc_static = 0x11f, .ubwc_static = 0x11f,
.highest_bank_bit = 0x1, .highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sm6125_data = { static const struct msm_mdss_data sm6125_data = {
@ -584,6 +600,7 @@ static const struct msm_mdss_data sm6125_data = {
.ubwc_dec_version = UBWC_3_0, .ubwc_dec_version = UBWC_3_0,
.ubwc_swizzle = 1, .ubwc_swizzle = 1,
.highest_bank_bit = 1, .highest_bank_bit = 1,
.reg_bus_bw = 76800,
}; };
static const struct msm_mdss_data sm8250_data = { static const struct msm_mdss_data sm8250_data = {
@ -594,6 +611,18 @@ static const struct msm_mdss_data sm8250_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3, .highest_bank_bit = 3,
.macrotile_mode = 1, .macrotile_mode = 1,
.reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm8350_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
.reg_bus_bw = 74000,
}; };
static const struct msm_mdss_data sm8550_data = { static const struct msm_mdss_data sm8550_data = {
@ -604,6 +633,7 @@ static const struct msm_mdss_data sm8550_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3, .highest_bank_bit = 3,
.macrotile_mode = 1, .macrotile_mode = 1,
.reg_bus_bw = 57000,
}; };
static const struct of_device_id mdss_dt_match[] = { static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" }, { .compatible = "qcom,mdss" },
@ -620,8 +650,8 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data }, { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data }, { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data }, { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data }, { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
{} {}
}; };

View File

@ -14,6 +14,7 @@ struct msm_mdss_data {
u32 ubwc_static; u32 ubwc_static;
u32 highest_bank_bit; u32 highest_bank_bit;
u32 macrotile_mode; u32 macrotile_mode;
u32 reg_bus_bw;
}; };
#define UBWC_1_0 0x10000000 #define UBWC_1_0 0x10000000

View File

@ -187,7 +187,8 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
break; break;
case NVKM_FIRMWARE_IMG_DMA: case NVKM_FIRMWARE_IMG_DMA:
nvkm_memory_unref(&memory); nvkm_memory_unref(&memory);
dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys); dma_free_noncoherent(fw->device->dev, sg_dma_len(&fw->mem.sgl),
fw->img, fw->phys, DMA_TO_DEVICE);
break; break;
default: default:
WARN_ON(1); WARN_ON(1);
@ -212,10 +213,12 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
break; break;
case NVKM_FIRMWARE_IMG_DMA: { case NVKM_FIRMWARE_IMG_DMA: {
dma_addr_t addr; dma_addr_t addr;
len = ALIGN(fw->len, PAGE_SIZE); len = ALIGN(fw->len, PAGE_SIZE);
fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL); fw->img = dma_alloc_noncoherent(fw->device->dev,
len, &addr,
DMA_TO_DEVICE,
GFP_KERNEL);
if (fw->img) { if (fw->img) {
memcpy(fw->img, src, fw->len); memcpy(fw->img, src, fw->len);
fw->phys = addr; fw->phys = addr;

View File

@ -89,6 +89,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
nvkm_falcon_fw_dtor_sigs(fw); nvkm_falcon_fw_dtor_sigs(fw);
} }
/* after last write to the img, sync dma mappings */
dma_sync_single_for_device(fw->fw.device->dev,
fw->fw.phys,
sg_dma_len(&fw->fw.mem.sgl),
DMA_TO_DEVICE);
FLCNFW_DBG(fw, "resetting"); FLCNFW_DBG(fw, "resetting");
fw->func->reset(fw); fw->func->reset(fw);

View File

@ -935,8 +935,7 @@ static int j606f_boe_init_sequence(struct panel_info *pinfo)
static const struct drm_display_mode elish_boe_modes[] = { static const struct drm_display_mode elish_boe_modes[] = {
{ {
/* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */ .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 120 / 1000,
.clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000,
.hdisplay = 1600, .hdisplay = 1600,
.hsync_start = 1600 + 60, .hsync_start = 1600 + 60,
.hsync_end = 1600 + 60 + 8, .hsync_end = 1600 + 60 + 8,
@ -950,8 +949,7 @@ static const struct drm_display_mode elish_boe_modes[] = {
static const struct drm_display_mode elish_csot_modes[] = { static const struct drm_display_mode elish_csot_modes[] = {
{ {
/* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */ .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 120 / 1000,
.clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000,
.hdisplay = 1600, .hdisplay = 1600,
.hsync_start = 1600 + 200, .hsync_start = 1600 + 200,
.hsync_end = 1600 + 200 + 40, .hsync_end = 1600 + 200 + 40,

View File

@ -1260,6 +1260,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270); vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270);
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90); vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90);
} else { } else {
if (vop2_cluster_window(win)) {
vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
}
vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4)); vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
} }

View File

@ -177,7 +177,7 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map)
static void *tegra_bo_mmap(struct host1x_bo *bo) static void *tegra_bo_mmap(struct host1x_bo *bo)
{ {
struct tegra_bo *obj = host1x_to_tegra_bo(bo); struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct iosys_map map; struct iosys_map map = { 0 };
int ret; int ret;
if (obj->vaddr) { if (obj->vaddr) {

View File

@ -1924,12 +1924,14 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
int fmax = field->logical_maximum; int fmax = field->logical_maximum;
unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
int resolution_code = code; int resolution_code = code;
int resolution = hidinput_calc_abs_res(field, resolution_code); int resolution;
if (equivalent_usage == HID_DG_TWIST) { if (equivalent_usage == HID_DG_TWIST) {
resolution_code = ABS_RZ; resolution_code = ABS_RZ;
} }
resolution = hidinput_calc_abs_res(field, resolution_code);
if (equivalent_usage == HID_GD_X) { if (equivalent_usage == HID_GD_X) {
fmin += features->offset_left; fmin += features->offset_left;
fmax -= features->offset_right; fmax -= features->offset_right;

View File

@ -875,9 +875,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
} }
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val); ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret) if (!ret) {
if (!val) {
fwnode_handle_put(child);
return dev_err_probe(&st->client->dev, -EINVAL,
"shunt resistor value cannot be zero\n");
}
st->r_sense_uohm[addr] = val; st->r_sense_uohm[addr] = val;
} }
}
return 0; return 0;
} }

View File

@ -323,7 +323,11 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
} }
/* Voltages */ /* Voltages */
for (i = 0; i < data->innr; i++) { /*
* The min() below does not have any practical meaning and is
* only needed to silence a warning observed with gcc 12+.
*/
for (i = 0; i < min(data->innr, ARRAY_SIZE(data->in)); i++) {
data->in_status[i] = pc87360_read_value(data, LD_IN, i, data->in_status[i] = pc87360_read_value(data, LD_IN, i,
PC87365_REG_IN_STATUS); PC87365_REG_IN_STATUS);
/* Clear bits */ /* Clear bits */

View File

@ -987,8 +987,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
return ret; return ret;
ret = clk_prepare_enable(gi2c->core_clk); ret = clk_prepare_enable(gi2c->core_clk);
if (ret) if (ret) {
geni_icc_disable(&gi2c->se);
return ret; return ret;
}
ret = geni_se_resources_on(&gi2c->se); ret = geni_se_resources_on(&gi2c->se);
if (ret) { if (ret) {

View File

@ -313,7 +313,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
* frequency with only 62 clock ticks max (31 high, 31 low). * frequency with only 62 clock ticks max (31 high, 31 low).
* Aim for a duty of 60% LOW, 40% HIGH. * Aim for a duty of 60% LOW, 40% HIGH.
*/ */
total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz); total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
for (cks = 0; cks < 7; cks++) { for (cks = 0; cks < 7; cks++) {
/* /*

View File

@ -357,6 +357,7 @@ struct stm32f7_i2c_dev {
u32 dnf_dt; u32 dnf_dt;
u32 dnf; u32 dnf;
struct stm32f7_i2c_alert *alert; struct stm32f7_i2c_alert *alert;
bool atomic;
}; };
/* /*
@ -915,7 +916,8 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure DMA or enable RX/TX interrupt */ /* Configure DMA or enable RX/TX interrupt */
i2c_dev->use_dma = false; i2c_dev->use_dma = false;
if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) { if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN
&& !i2c_dev->atomic) {
ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma, ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
msg->flags & I2C_M_RD, msg->flags & I2C_M_RD,
f7_msg->count, f7_msg->buf, f7_msg->count, f7_msg->buf,
@ -939,6 +941,9 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
cr1 |= STM32F7_I2C_CR1_TXDMAEN; cr1 |= STM32F7_I2C_CR1_TXDMAEN;
} }
if (i2c_dev->atomic)
cr1 &= ~STM32F7_I2C_ALL_IRQ_MASK; /* Disable all interrupts */
/* Configure Start/Repeated Start */ /* Configure Start/Repeated Start */
cr2 |= STM32F7_I2C_CR2_START; cr2 |= STM32F7_I2C_CR2_START;
@ -1673,7 +1678,22 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev)
{
ktime_t timeout = ktime_add_ms(ktime_get(), i2c_dev->adap.timeout);
while (ktime_compare(ktime_get(), timeout) < 0) {
udelay(5);
stm32f7_i2c_isr_event(0, i2c_dev);
if (completion_done(&i2c_dev->complete))
return 1;
}
return 0;
}
static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num) struct i2c_msg msgs[], int num)
{ {
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
@ -1697,8 +1717,12 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
stm32f7_i2c_xfer_msg(i2c_dev, msgs); stm32f7_i2c_xfer_msg(i2c_dev, msgs);
if (!i2c_dev->atomic)
time_left = wait_for_completion_timeout(&i2c_dev->complete, time_left = wait_for_completion_timeout(&i2c_dev->complete,
i2c_dev->adap.timeout); i2c_dev->adap.timeout);
else
time_left = stm32f7_i2c_wait_polling(i2c_dev);
ret = f7_msg->result; ret = f7_msg->result;
if (ret) { if (ret) {
if (i2c_dev->use_dma) if (i2c_dev->use_dma)
@ -1730,6 +1754,24 @@ pm_free:
return (ret < 0) ? ret : num; return (ret < 0) ? ret : num;
} }
static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
i2c_dev->atomic = false;
return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
}
static int stm32f7_i2c_xfer_atomic(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
i2c_dev->atomic = true;
return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
}
static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char read_write, unsigned short flags, char read_write,
u8 command, int size, u8 command, int size,
@ -2098,6 +2140,7 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm stm32f7_i2c_algo = { static const struct i2c_algorithm stm32f7_i2c_algo = {
.master_xfer = stm32f7_i2c_xfer, .master_xfer = stm32f7_i2c_xfer,
.master_xfer_atomic = stm32f7_i2c_xfer_atomic,
.smbus_xfer = stm32f7_i2c_smbus_xfer, .smbus_xfer = stm32f7_i2c_smbus_xfer,
.functionality = stm32f7_i2c_func, .functionality = stm32f7_i2c_func,
.reg_slave = stm32f7_i2c_reg_slave, .reg_slave = stm32f7_i2c_reg_slave,

View File

@ -1804,9 +1804,9 @@ static int tegra_i2c_probe(struct platform_device *pdev)
* domain. * domain.
* *
* VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't
* be used for atomic transfers. * be used for atomic transfers. ACPI device is not IRQ safe also.
*/ */
if (!IS_VI(i2c_dev)) if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev))
pm_runtime_irq_safe(i2c_dev->dev); pm_runtime_irq_safe(i2c_dev->dev);
pm_runtime_enable(i2c_dev->dev); pm_runtime_enable(i2c_dev->dev);

View File

@ -345,6 +345,8 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
xfer = xfer_list + i; xfer = xfer_list + i;
if (!xfer->data)
continue;
dma_unmap_single(&hci->master.dev, dma_unmap_single(&hci->master.dev,
xfer->data_dma, xfer->data_len, xfer->data_dma, xfer->data_len,
xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE); xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
@ -450,10 +452,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
/* /*
* We're deep in it if ever this condition is ever met. * We're deep in it if ever this condition is ever met.
* Hardware might still be writing to memory, etc. * Hardware might still be writing to memory, etc.
* Better suspend the world than risking silent corruption.
*/ */
dev_crit(&hci->master.dev, "unable to abort the ring\n"); dev_crit(&hci->master.dev, "unable to abort the ring\n");
BUG(); WARN_ON(1);
} }
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {

View File

@ -13185,15 +13185,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
{ {
u64 reg; u64 reg;
u16 idx = src / BITS_PER_REGISTER; u16 idx = src / BITS_PER_REGISTER;
unsigned long flags;
spin_lock(&dd->irq_src_lock); spin_lock_irqsave(&dd->irq_src_lock, flags);
reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
if (set) if (set)
reg |= bits; reg |= bits;
else else
reg &= ~bits; reg &= ~bits;
write_csr(dd, CCE_INT_MASK + (8 * idx), reg); write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
spin_unlock(&dd->irq_src_lock); spin_unlock_irqrestore(&dd->irq_src_lock, flags);
} }
/** /**

View File

@ -255,7 +255,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
static int create_qp(struct rtrs_con *con, struct ib_pd *pd, static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
u32 max_send_wr, u32 max_recv_wr, u32 max_sge) u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
{ {
struct ib_qp_init_attr init_attr = {NULL}; struct ib_qp_init_attr init_attr = {};
struct rdma_cm_id *cm_id = con->cm_id; struct rdma_cm_id *cm_id = con->cm_id;
int ret; int ret;

View File

@ -46,6 +46,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
return 0; return 0;
if (mt) if (mt)
return mt->num_slots != num_slots ? -EINVAL : 0; return mt->num_slots != num_slots ? -EINVAL : 0;
/* Arbitrary limit for avoiding too large memory allocation. */
if (num_slots > 1024)
return -EINVAL;
mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL); mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
if (!mt) if (!mt)

Some files were not shown because too many files have changed in this diff Show More