Merge branch 'linux-6.6.y'

This commit is contained in:
Jianping Liu 2024-03-04 10:25:33 +08:00
commit 8dcb1461d4
299 changed files with 3609 additions and 1714 deletions

View File

@ -383,6 +383,12 @@ latex_elements = {
verbatimhintsturnover=false,
''',
#
# Some of our authors are fond of deep nesting; tell latex to
# cope.
#
'maxlistdepth': '10',
# For CJK One-half spacing, need to be in front of hyperref
'extrapackages': r'\usepackage{setspace}',

View File

@ -8,7 +8,7 @@ else
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 18
SUBLEVEL = 19
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -45,8 +45,8 @@
num-chipselects = <1>;
cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
tpmdev@0 {
compatible = "tcg,tpm_tis-spi";
tpm@0 {
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
};

View File

@ -80,8 +80,8 @@
gpio-miso = <&gpio ASPEED_GPIO(R, 5) GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
tpmdev@0 {
compatible = "tcg,tpm_tis-spi";
tpm@0 {
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
};

View File

@ -456,7 +456,7 @@
status = "okay";
tpm: tpm@2e {
compatible = "tcg,tpm-tis-i2c";
compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
reg = <0x2e>;
};
};

View File

@ -35,8 +35,8 @@
gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
tpmdev@0 {
compatible = "tcg,tpm_tis-spi";
tpm@0 {
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
};

View File

@ -121,7 +121,7 @@
tpm_tis: tpm@1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tpm>;
compatible = "tcg,tpm_tis-spi";
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
reg = <1>;
spi-max-frequency = <20000000>;
interrupt-parent = <&gpio5>;

View File

@ -130,7 +130,7 @@
* TCG specification - Section 6.4.1 Clocking:
* TPM shall support a SPI clock frequency range of 10-24 MHz.
*/
st33htph: tpm-tis@0 {
st33htph: tpm@0 {
compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
reg = <0>;
spi-max-frequency = <24000000>;

View File

@ -217,7 +217,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi1_pins>;
tpm_spi_tis@0 {
tpm@0 {
compatible = "tcg,tpm_tis-spi";
reg = <0>;
spi-max-frequency = <500000>;

View File

@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
GPIO_LOOKUP_IDX("G", 0, NULL, 1,
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
{ }
},
};

View File

@ -484,7 +484,7 @@
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
status = "okay";
status = "disabled";
};
&usb3_phy0 {

View File

@ -168,6 +168,13 @@
enable-active-high;
};
reg_vcc_1v8: regulator-1v8 {
compatible = "regulator-fixed";
regulator-name = "VCC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
reg_vcc_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3";
@ -464,7 +471,7 @@
clock-names = "mclk";
clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1>;
reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
iov-supply = <&reg_vcc_3v3>;
iov-supply = <&reg_vcc_1v8>;
ldoin-supply = <&reg_vcc_3v3>;
};

View File

@ -632,6 +632,7 @@
clock-names = "spiclk", "apb_pclk";
dmas = <&dmac 12>, <&dmac 13>;
dma-names = "tx", "rx";
num-cs = <2>;
pinctrl-names = "default";
pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
#address-cells = <1>;
@ -647,6 +648,7 @@
clock-names = "spiclk", "apb_pclk";
dmas = <&dmac 14>, <&dmac 15>;
dma-names = "tx", "rx";
num-cs = <2>;
pinctrl-names = "default";
pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
#address-cells = <1>;

View File

@ -163,13 +163,13 @@
&gpio1 {
gpio-line-names = /* GPIO1 A0-A7 */
"HEADER_27_3v3", "HEADER_28_3v3", "", "",
"HEADER_27_3v3", "", "", "",
"HEADER_29_1v8", "", "HEADER_7_1v8", "",
/* GPIO1 B0-B7 */
"", "HEADER_31_1v8", "HEADER_33_1v8", "",
"HEADER_11_1v8", "HEADER_13_1v8", "", "",
/* GPIO1 C0-C7 */
"", "", "", "",
"", "HEADER_28_3v3", "", "",
"", "", "", "",
/* GPIO1 D0-D7 */
"", "", "", "",
@ -193,11 +193,11 @@
&gpio4 {
gpio-line-names = /* GPIO4 A0-A7 */
"", "", "HEADER_37_3v3", "HEADER_32_3v3",
"HEADER_36_3v3", "", "HEADER_35_3v3", "HEADER_38_3v3",
"", "", "HEADER_37_3v3", "HEADER_8_3v3",
"HEADER_10_3v3", "", "HEADER_32_3v3", "HEADER_35_3v3",
/* GPIO4 B0-B7 */
"", "", "", "HEADER_40_3v3",
"HEADER_8_3v3", "HEADER_10_3v3", "", "",
"HEADER_38_3v3", "HEADER_36_3v3", "", "",
/* GPIO4 C0-C7 */
"", "", "", "",
"", "", "", "",

View File

@ -360,6 +360,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
extern unsigned int sme_get_vl(void);
extern int sme_set_current_vl(unsigned long arg);
extern int sme_get_current_vl(void);
extern void sme_suspend_exit(void);
/*
* Return how many bytes of memory are required to store the full SME
@ -395,6 +396,7 @@ static inline int sme_max_vl(void) { return 0; }
static inline int sme_max_virtualisable_vl(void) { return 0; }
static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
static inline int sme_get_current_vl(void) { return -EINVAL; }
static inline void sme_suspend_exit(void) { }
static inline size_t sme_state_size(struct task_struct const *task)
{

View File

@ -1406,6 +1406,22 @@ void __init sme_setup(void)
get_sme_default_vl());
}
void sme_suspend_exit(void)
{
u64 smcr = 0;
if (!system_supports_sme())
return;
if (system_supports_fa64())
smcr |= SMCR_ELx_FA64;
if (system_supports_sme2())
smcr |= SMCR_ELx_EZT0;
write_sysreg_s(smcr, SYS_SMCR_EL1);
write_sysreg_s(0, SYS_SMPRI_EL1);
}
#endif /* CONFIG_ARM64_SME */
static void sve_init_regs(void)

View File

@ -12,6 +12,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mte.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
@ -80,6 +81,8 @@ void notrace __cpu_suspend_exit(void)
*/
spectre_v4_enable_mitigation(NULL);
sme_suspend_exit();
/* Restore additional feature-specific configuration */
ptrauth_suspend_exit();
}

View File

@ -462,6 +462,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
}
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
if (!irq)
continue;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@ -1427,6 +1430,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
for (i = 0; i < irq_count; i++) {
irq = vgic_get_irq(kvm, NULL, intids[i]);
if (!irq)
continue;
update_affinity(irq, vcpu2);

View File

@ -11,6 +11,7 @@ config LOONGARCH
select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
@ -97,6 +98,7 @@ config LOONGARCH
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
@ -603,23 +605,6 @@ config RANDOMIZE_BASE_MAX_OFFSET
This is limited by the size of the lower address memory, 256MB.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
endmenu
config ARCH_SELECT_MEMORY_MODEL
@ -638,10 +623,6 @@ config ARCH_SPARSEMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/mm/numa.rst> for more.
config ARCH_ENABLE_THP_MIGRATION
def_bool y
depends on TRANSPARENT_HUGEPAGE
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG

View File

@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
#define MAX_CORE_PIC 256
extern struct list_head acpi_wakeup_device_list;
extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
extern int __init parse_acpi_topology(void);

View File

@ -29,11 +29,9 @@ int disabled_cpus;
u64 acpi_saved_sp;
#define MAX_CORE_PIC 256
#define PREFIX "ACPI: "
struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
{

View File

@ -367,6 +367,8 @@ void __init platform_init(void)
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
early_init_fdt_scan_reserved_mem();
unflatten_and_copy_device_tree();
#ifdef CONFIG_NUMA
@ -400,8 +402,6 @@ static void __init arch_mem_init(char **cmdline_p)
check_kernel_sections_mem();
early_init_fdt_scan_reserved_mem();
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate

View File

@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec)
}
}
static inline void set_cpu_core_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_core_setup_map);
for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
}
static inline void clear_cpu_sibling_map(int cpu)
{
int i;
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
}
}
cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
for_each_online_cpu(i)
cpumask_andnot(&cpu_foreign_map[i],
&temp_foreign_map, &cpu_sibling_map[i]);
}
/* Send mailbox buffer via Mail_Send */
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
{
@ -300,6 +367,7 @@ int loongson_cpu_disable(void)
numa_remove_cpu(cpu);
#endif
set_cpu_online(cpu, false);
clear_cpu_sibling_map(cpu);
calculate_cpu_foreign_map();
local_irq_save(flags);
irq_migrate_all_off_this_cpu();
@ -334,6 +402,7 @@ void __noreturn arch_cpu_idle_dead(void)
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
} while (addr == 0);
local_irq_disable();
init_fn = (void *)TO_CACHE(addr);
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
@ -376,59 +445,6 @@ static int __init ipi_pm_init(void)
core_initcall(ipi_pm_init);
#endif
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
}
static inline void set_cpu_core_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_core_setup_map);
for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
for_each_online_cpu(i)
cpumask_andnot(&cpu_foreign_map[i],
&temp_foreign_map, &cpu_sibling_map[i]);
}
/* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{

View File

@ -2,6 +2,7 @@
# Objects to go into the VDSO.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# Include the generic Makefile to check the built vdso.

View File

@ -2007,6 +2007,12 @@ unsigned long vi_handlers[64];
void reserve_exception_space(phys_addr_t addr, unsigned long size)
{
/*
* reserve exception space on CPUs other than CPU0
* is too late, since memblock is unavailable when APs
* up
*/
if (smp_processor_id() == 0)
memblock_reserve(addr, size);
}

View File

@ -172,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev)
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
set_cpu_possible(cpuid, true);
store_cpu_topology(cpuid);
#ifdef CONFIG_SMP
@ -474,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = {
*/
void __init processor_init(void)
{
unsigned int cpu;
reset_cpu_topology();
/* reset possible mask. We will mark those which are possible. */
for_each_possible_cpu(cpu)
set_cpu_possible(cpu, false);
register_parisc_driver(&cpu_driver);
}

View File

@ -228,10 +228,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
#ifdef CONFIG_IRQSTACKS
extern void * const _call_on_stack;
#endif /* CONFIG_IRQSTACKS */
void *ptr;
ptr = dereference_kernel_function_descriptor(&handle_interruption);
if (pc_is_kernel_fn(pc, ptr)) {
if (pc_is_kernel_fn(pc, handle_interruption)) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
@ -239,13 +237,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
return 1;
}
if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
pc_is_kernel_fn(pc, syscall_exit)) {
if (pc == (unsigned long)&ret_from_kernel_thread ||
pc == (unsigned long)&syscall_exit) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
if (pc_is_kernel_fn(pc, intr_return)) {
if (pc == (unsigned long)&intr_return) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
@ -257,14 +255,14 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
}
if (pc_is_kernel_fn(pc, _switch_to) ||
pc_is_kernel_fn(pc, _switch_to_ret)) {
pc == (unsigned long)&_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
#ifdef CONFIG_IRQSTACKS
if (pc_is_kernel_fn(pc, _call_on_stack)) {
if (pc == (unsigned long)&_call_on_stack) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;

View File

@ -30,6 +30,16 @@ void *pci_traverse_device_nodes(struct device_node *start,
void *data);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
#if defined(CONFIG_IOMMU_API) && (defined(CONFIG_PPC_PSERIES) || \
defined(CONFIG_PPC_POWERNV))
extern void ppc_iommu_register_device(struct pci_controller *phb);
extern void ppc_iommu_unregister_device(struct pci_controller *phb);
#else
static inline void ppc_iommu_register_device(struct pci_controller *phb) { }
static inline void ppc_iommu_unregister_device(struct pci_controller *phb) { }
#endif
/* From rtas_pci.h */
extern void init_pci_config_tokens (void);
extern unsigned long get_phb_buid (struct device_node *);

View File

@ -1344,7 +1344,7 @@ static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
struct pci_controller *hose;
if (!dev_is_pci(dev))
return ERR_PTR(-EPERM);
return ERR_PTR(-ENODEV);
pdev = to_pci_dev(dev);
hose = pdev->bus->sysdata;
@ -1393,6 +1393,21 @@ static const struct attribute_group *spapr_tce_iommu_groups[] = {
NULL,
};
void ppc_iommu_register_device(struct pci_controller *phb)
{
iommu_device_sysfs_add(&phb->iommu, phb->parent,
spapr_tce_iommu_groups, "iommu-phb%04x",
phb->global_number);
iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
phb->parent);
}
void ppc_iommu_unregister_device(struct pci_controller *phb)
{
iommu_device_unregister(&phb->iommu);
iommu_device_sysfs_remove(&phb->iommu);
}
/*
* This registers IOMMU devices of PHBs. This needs to happen
* after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
@ -1403,11 +1418,7 @@ static int __init spapr_tce_setup_phb_iommus_initcall(void)
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node) {
iommu_device_sysfs_add(&hose->iommu, hose->parent,
spapr_tce_iommu_groups, "iommu-phb%04x",
hose->global_number);
iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
hose->parent);
ppc_iommu_register_device(hose);
}
return 0;
}

View File

@ -35,6 +35,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
pseries_msi_allocate_domains(phb);
ppc_iommu_register_device(phb);
/* Create EEH devices for the PHB */
eeh_phb_pe_create(phb);
@ -76,6 +78,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
}
}
ppc_iommu_unregister_device(phb);
pseries_msi_free_domains(phb);
/* Keep a reference so phb isn't freed yet */

View File

@ -241,7 +241,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
/* combine single writes by using store-block insn */
void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
zpci_memcpy_toio(to, from, count);
zpci_memcpy_toio(to, from, count * 8);
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,

View File

@ -60,7 +60,7 @@ libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
drivers-$(CONFIG_FB) += arch/sparc/video/
drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
boot := arch/sparc/boot

View File

@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_FB) += fbdev.o
obj-$(CONFIG_FB_CORE) += fbdev.o

View File

@ -6,6 +6,9 @@
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/segment.h>
#include <asm/cache.h>
.pushsection .noinstr.text, "ax"
@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
EXPORT_SYMBOL_GPL(entry_ibpb);
.popsection
/*
* Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be
* used late in exit-to-user path after page tables are switched.
*/
.pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
.word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel);
/* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel);
.popsection

View File

@ -97,7 +97,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */

View File

@ -329,6 +329,17 @@
#endif
.endm
/*
* Macro to execute VERW instruction that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
*
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro CLEAR_CPU_BUFFERS
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
.endm
#else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \
@ -545,6 +556,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
extern u16 mds_verw_sel;
#include <asm/segment.h>
/**

View File

@ -961,7 +961,7 @@ static int __init cmp_memblk(const void *a, const void *b)
const struct numa_memblk *ma = *(const struct numa_memblk **)a;
const struct numa_memblk *mb = *(const struct numa_memblk **)b;
return ma->start - mb->start;
return (ma->start > mb->start) - (ma->start < mb->start);
}
static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
@ -971,14 +971,12 @@ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
* @start: address to begin fill
* @end: address to end fill
*
* Find and extend numa_meminfo memblks to cover the @start-@end
* physical address range, such that the first memblk includes
* @start, the last memblk includes @end, and any gaps in between
* are filled.
* Find and extend numa_meminfo memblks to cover the physical
* address range @start-@end
*
* RETURNS:
* 0 : Success
* NUMA_NO_MEMBLK : No memblk exists in @start-@end range
* NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
*/
int __init numa_fill_memblks(u64 start, u64 end)
@ -990,17 +988,14 @@ int __init numa_fill_memblks(u64 start, u64 end)
/*
* Create a list of pointers to numa_meminfo memblks that
* overlap start, end. Exclude (start == bi->end) since
* end addresses in both a CFMWS range and a memblk range
* are exclusive.
*
* This list of pointers is used to make in-place changes
* that fill out the numa_meminfo memblks.
* overlap start, end. The list is used to make in-place
* changes that fill out the numa_meminfo memblks.
*/
for (int i = 0; i < mi->nr_blks; i++) {
struct numa_memblk *bi = &mi->blk[i];
if (start < bi->end && end >= bi->start) {
if (memblock_addrs_overlap(start, end - start, bi->start,
bi->end - bi->start)) {
blk[count] = &mi->blk[i];
count++;
}

View File

@ -205,12 +205,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
/*
* success
*/
if ((iov_iter_rw(iter) == WRITE &&
(!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
if (iov_iter_rw(iter) == WRITE &&
(!map_data || !map_data->null_mapped)) {
ret = bio_copy_from_iter(bio, iter);
if (ret)
goto cleanup;
} else if (map_data && map_data->from_user) {
struct iov_iter iter2 = *iter;
/* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
iter2.data_source = ITER_SOURCE;
ret = bio_copy_from_iter(bio, &iter2);
if (ret)
goto cleanup;
} else {
if (bmd->is_our_pages)
zero_fill_bio(bio);

View File

@ -467,8 +467,7 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
/* VPU 37XX does not require 10m D3hot delay */
if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
/* NPU does not require 10m D3hot delay */
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);

View File

@ -562,7 +562,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);

View File

@ -24,7 +24,7 @@
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
#define PLL_CONFIG_DEFAULT 0x1
#define PLL_CONFIG_DEFAULT 0x0
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)
@ -523,7 +523,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@ -697,7 +697,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_disable;
u32 tile_enable;
u32 fuse;
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
@ -718,10 +717,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
tile_enable = (~tile_disable) & TILE_MAX_MASK;
hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
hw->tile_fuse = tile_disable;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;

View File

@ -491,7 +491,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->cmdq.cons = 0;
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
@ -805,8 +804,6 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);

View File

@ -48,6 +48,7 @@ enum {
enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_43bit_dma,
board_ahci_ign_iferr,
board_ahci_low_power,
board_ahci_no_debounce_delay,
@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_43bit_dma] = {
AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_ign_iferr] = {
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
@ -596,14 +604,19 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
/* ASMedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
{ PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
{ PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */
{ PCI_VDEVICE(ASMEDIA, 0x1165), board_ahci }, /* ASM1165 */
{ PCI_VDEVICE(ASMEDIA, 0x1166), board_ahci }, /* ASM1166 */
/*
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
@ -657,6 +670,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
dev_info(&pdev->dev, "ASM1166 has only six ports\n");
hpriv->saved_port_map = 0x3f;
}
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
@ -943,11 +961,20 @@ static int ahci_pci_device_resume(struct device *dev)
#endif /* CONFIG_PM */
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
static int ahci_configure_dma_masks(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
const int dma_bits = using_dac ? 64 : 32;
int dma_bits;
int rc;
if (hpriv->cap & HOST_CAP_64) {
dma_bits = 64;
if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
dma_bits = 43;
} else {
dma_bits = 32;
}
/*
* If the device fixup already set the dma_mask to some non-standard
* value, don't extend it here. This happens on STA2X11, for example.
@ -1920,7 +1947,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_gtf_filter_workaround(host);
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
return rc;

View File

@ -247,6 +247,7 @@ enum {
AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
suspend/resume */
AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
/* ap->flags bits */

View File

@ -88,7 +88,6 @@ struct ceva_ahci_priv {
u32 axicc;
bool is_cci_enabled;
int flags;
struct reset_control *rst;
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
{
int rc, i;
rc = ahci_platform_enable_regulators(hpriv);
if (rc)
return rc;
rc = ahci_platform_enable_clks(hpriv);
if (rc)
goto disable_regulator;
/* Assert the controller reset */
rc = ahci_platform_assert_rsts(hpriv);
if (rc)
goto disable_clks;
for (i = 0; i < hpriv->nports; i++) {
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_rsts;
}
/* De-assert the controller reset */
ahci_platform_deassert_rsts(hpriv);
for (i = 0; i < hpriv->nports; i++) {
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
goto disable_phys;
}
}
return 0;
disable_rsts:
ahci_platform_deassert_rsts(hpriv);
disable_phys:
while (--i >= 0) {
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
disable_clks:
ahci_platform_disable_clks(hpriv);
disable_regulator:
ahci_platform_disable_regulators(hpriv);
return rc;
}
static int ceva_ahci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
return -ENOMEM;
cevapriv->ahci_pdev = pdev;
cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
NULL);
if (IS_ERR(cevapriv->rst))
dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
"failed to get reset\n");
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
if (!cevapriv->rst) {
rc = ahci_platform_enable_resources(hpriv);
hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
NULL);
if (IS_ERR(hpriv->rsts))
return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
"failed to get reset\n");
rc = ceva_ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
} else {
int i;
rc = ahci_platform_enable_clks(hpriv);
if (rc)
return rc;
/* Assert the controller reset */
reset_control_assert(cevapriv->rst);
for (i = 0; i < hpriv->nports; i++) {
rc = phy_init(hpriv->phys[i]);
if (rc)
return rc;
}
/* De-assert the controller reset */
reset_control_deassert(cevapriv->rst);
for (i = 0; i < hpriv->nports; i++) {
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
return rc;
}
}
}
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
/* Read OOB timing value for COMWAKE from device-tree*/
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
/* Read phy BURST timing value from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
/* Read phy RETRY interval timing value from device-tree */
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
return -EINVAL;
rc = -EINVAL;
goto disable_resources;
}
/*
@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
rc = ceva_ahci_platform_enable_resources(hpriv);
if (rc)
return rc;

View File

@ -2034,6 +2034,10 @@ void ata_dev_power_set_active(struct ata_device *dev)
struct ata_taskfile tf;
unsigned int err_mask;
/* If the device is already sleeping, do nothing. */
if (dev->flags & ATA_DFLAG_SLEEPING)
return;
/*
* Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
* if supported by the device.

View File

@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
struct gendisk *gd;
mempool_t *mp;
struct blk_mq_tag_set *set;
sector_t ssize;
ulong flags;
int late = 0;
int err;
@ -395,7 +396,7 @@ aoeblk_gdalloc(void *vp)
gd->minors = AOE_PARTITIONS;
gd->fops = &aoe_bdops;
gd->private_data = d;
set_capacity(gd, d->ssize);
ssize = d->ssize;
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
@ -404,6 +405,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
set_capacity(gd, ssize);
err = device_add_disk(NULL, gd, aoe_attr_groups);
if (err)
goto out_disk_cleanup;

View File

@ -1629,14 +1629,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
/* Ensure no requests in virtqueues before deleting vqs. */
blk_mq_freeze_queue(vblk->disk->queue);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
blk_mq_quiesce_queue(vblk->disk->queue);
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@ -1654,7 +1655,7 @@ static int virtblk_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
blk_mq_unquiesce_queue(vblk->disk->queue);
blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif

View File

@ -117,7 +117,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
i++;
}
if (i == 0 || i % 4)
if (i == 0)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {

View File

@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
unsigned long line_size;
unsigned long flags;
if (unlikely(start == end))
return;
line_size = ax45mp_priv.ax45mp_cache_line_size;
start = start & (~(line_size - 1));
end = ((end + line_size - 1) & (~(line_size - 1)));
local_irq_save(flags);
ax45mp_cpu_dcache_wb_range(start, end);
local_irq_restore(flags);

View File

@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
struct virtio_crypto_ctrl_header *header, void *para,
struct virtio_crypto_ctrl_header *header,
struct virtio_crypto_akcipher_session_para *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
memcpy(&ctrl->u, para, sizeof(ctrl->u));
memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);

View File

@ -194,31 +194,27 @@ struct cxl_cfmws_context {
int id;
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
struct resource *res;
int rc;
cfmws = (struct acpi_cedt_cfmws *) header;
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
return 0;
return rc;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@ -254,7 +250,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
return 0;
return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@ -295,16 +291,7 @@ err_xormap:
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
dev_err(dev, "Failed to add decode range: %pr", res);
return rc;
}
dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
dev_name(&cxld->dev),
phys_to_target_node(cxld->hpa_range.start),
cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
err_insert:
kfree(res->name);
@ -313,6 +300,29 @@ err_name:
return -ENOMEM;
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
struct cxl_cfmws_context *ctx = arg;
struct device *dev = ctx->dev;
int rc;
rc = __cxl_parse_cfmws(cfmws, ctx);
if (rc)
dev_err(dev,
"Failed to add decode range: [%#llx - %#llx] (%d)\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1, rc);
else
dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
/* never fail cxl_acpi load for a single window failure */
return 0;
}
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{

View File

@ -475,9 +475,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
if (!allowed) {
cxl_set_mem_enable(cxlds, 0);
info->mem_enabled = 0;
if (!allowed && info->mem_enabled) {
dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
return -ENXIO;
}
/*

View File

@ -57,6 +57,8 @@
#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
#define BUS_WIDTH_8BIT 0x00
#define BUS_WIDTH_16BIT 0x01
#define BUS_WIDTH_32BIT 0x02
@ -740,7 +742,8 @@ static int admac_device_config(struct dma_chan *chan,
struct admac_data *ad = adchan->host;
bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
int wordsize = 0;
u32 bus_width = 0;
u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:

View File

@ -176,7 +176,7 @@ dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
};
struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
char name[32];
regs_dent = debugfs_create_dir(WRITE_STR, dent);
@ -239,7 +239,7 @@ static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
};
struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
char name[32];
regs_dent = debugfs_create_dir(READ_STR, dent);

View File

@ -116,7 +116,7 @@ static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
struct dentry *regs_dent, *ch_dent;
char name[16];
char name[32];
int i;
regs_dent = debugfs_create_dir(WRITE_STR, dent);
@ -133,7 +133,7 @@ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
{
struct dentry *regs_dent, *ch_dent;
char name[16];
char name[32];
int i;
regs_dent = debugfs_create_dir(READ_STR, dent);

View File

@ -805,7 +805,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
int i;
int cpu;
int ret;
char irq_name[20];
char irq_name[32];
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");

View File

@ -25,7 +25,7 @@ struct sh_dmae_chan {
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
void __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
char dev_id[32]; /* unique name per DMAC of channel */
int pm_error;
dma_addr_t slave_addr;
};

View File

@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {

View File

@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
if (root_device == NULL) {
if (card->gap_count == 0) {
/*
* If self IDs have inconsistent gap counts, do a
* bus reset ASAP. The config rom read might never
* complete, so don't wait for it. However, still
* send a PHY configuration packet prior to the
* bus reset. The PHY configuration packet might
* fail, but 1394-2008 8.4.5.2 explicitly permits
* it in this case, so it should be safe to try.
*/
new_root_id = local_id;
/*
* We must always send a bus reset if the gap count
* is inconsistent, so bypass the 5-reset limit.
*/
card->bm_retries = 0;
} else if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.

View File

@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
int md_size = md->num_pages << EFI_PAGE_SHIFT;
u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))

View File

@ -134,15 +134,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
* Special purpose memory is 'soft reserved', which means it
* is set aside initially, but can be hotplugged back in or
* be assigned to the dax driver after boot.
*/
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return false;
/*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
@ -187,6 +178,16 @@ static __init void reserve_regions(void)
size = npages << PAGE_SHIFT;
if (is_memory(md)) {
/*
* Special purpose memory is 'soft reserved', which
* means it is set aside initially. Don't add a memblock
* for it now so that it can be hotplugged back in or
* be assigned to the dax driver after boot.
*/
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
continue;
early_init_dt_add_memory_arch(paddr, size);
if (!is_usable_memory(md))

View File

@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
cflags-$(CONFIG_RISCV) += -fpic
cflags-$(CONFIG_RISCV) += -fpic -mno-relax
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt

View File

@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
int md_size = md->num_pages << EFI_PAGE_SHIFT;
u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))

View File

@ -1009,6 +1009,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
/* indicate amdgpu suspension status */
bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;

View File

@ -2409,6 +2409,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@ -2423,6 +2424,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);

View File

@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
}
}
if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:

View File

@ -3033,6 +3033,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
/* Now only limit the quirk on the APU gfx9 series and already
* confirmed that the APU gfx10/gfx11 needn't such update.
*/
if (adev->flags & AMD_IS_APU &&
adev->in_s3 && !adev->suspend_complete) {
DRM_INFO(" Will skip the CSB packet resubmit\n");
return 0;
}
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);

View File

@ -426,6 +426,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
u32 inst_mask;
int i;
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset =
SOC15_REG_OFFSET(
NBIO, 0,
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
<< 2;
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
0xff & ~(adev->gfx.xcc_mask));

View File

@ -1296,10 +1296,32 @@ static int soc15_common_suspend(void *handle)
return soc15_common_hw_fini(adev);
}
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
u32 sol_reg;
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
/* Will reset for the following suspend abort cases.
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
* 2) S3 suspend abort and TOS already launched.
*/
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
!adev->suspend_complete &&
sol_reg)
return true;
return false;
}
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (soc15_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
soc15_asic_reset(adev);
}
return soc15_common_hw_init(adev);
}

View File

@ -1482,10 +1482,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = adev_to_drm(kfd->adev);
struct drm_device *ddev;
if (node->xcp)
ddev = node->xcp->ddev;
else
ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,

View File

@ -1816,21 +1816,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
* It is expected that DMUB will resend any pending notifications at this point. Note
* that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
* align legacy interface initialization sequence. Connection status will be proactivly
* detected once in the amdgpu_dm_initialize_drm_device.
*/
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@ -2256,6 +2247,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@ -3484,6 +3476,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
}
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@ -3509,10 +3509,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
}
}
@ -4481,6 +4477,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
if (dm->hpd_rx_offload_wq)
dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");

View File

@ -1860,19 +1860,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@ -2435,10 +2437,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@ -2650,11 +2653,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@ -2812,11 +2816,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*

View File

@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
{
return dc->link_srv->validate_dpia_bandwidth(streams, count);
}

View File

@ -2116,11 +2116,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*
* @dc: pointer to dc struct
* @stream: pointer to all possible streams
* @num_streams: number of valid DPIA streams
* @count: number of valid DPIA streams
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
const unsigned int count);
/* Sink Interfaces - A sink corresponds to a display output device */

View File

@ -1433,6 +1433,12 @@ struct dp_trace {
#ifndef DP_TUNNELING_STATUS
#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
#endif
#ifndef DP_TUNNELING_MAX_LINK_RATE
#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
#endif
#ifndef DP_TUNNELING_MAX_LANE_COUNT
#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
#endif
#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
#endif

View File

@ -1110,21 +1110,25 @@ struct dc_panel_config {
} ilr;
};
#define MAX_SINKS_PER_LINK 4
/*
* USB4 DPIA BW ALLOCATION STRUCTS
*/
struct dc_dpia_bw_alloc {
int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
int sink_max_bw; // The Max BW that sink can require/support
int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already
int link_max_bw; // The Max BW that link can require/support
int allocated_bw; // The Actual Allocated BW for this DPIA
int estimated_bw; // The estimated available BW for this DPIA
int bw_granularity; // BW Granularity
int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
#define MAX_SINKS_PER_LINK 4
enum dc_hpd_enable_select {
HPD_EN_FOR_ALL_EDP = 0,
HPD_EN_FOR_PRIMARY_EDP_ONLY,

View File

@ -2071,17 +2071,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
}
}
/*
* If the link is DP-over-USB4 do the following:
* - Train with fallback when enabling DPIA link. Conventional links are
/* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
* - Allocate only what the stream needs for bw in Gbps. Inform the CM
* in case stream needs more or less bw from what has been allocated
* earlier at plug time.
*/
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
do_fallback = true;
}
/*
* Temporary w/a to get DP2.0 link rates to work with SST.
@ -2263,6 +2257,32 @@ static enum dc_status enable_link(
return status;
}
static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
{
return true;
}
static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
{
bool ret;
int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
dc_link_get_highest_encoding_format(stream->sink->link));
ret = allocate_usb4_bandwidth_for_stream(stream, bw);
return ret;
}
static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
{
bool ret;
ret = allocate_usb4_bandwidth_for_stream(stream, 0);
return ret;
}
void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
@ -2299,6 +2319,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
@ -2520,6 +2543,9 @@ void link_set_dpms_on(
}
}
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&

View File

@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
/*
* This function calculates the bandwidth required for the stream timing
* and aggregates the stream bandwidth for the respective dpia link
*
* @stream: pointer to the dc_stream_state struct instance
* @num_streams: number of streams to be validated
*
* return: true if validation is succeeded
*/
bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
{
bool ret = true;
int bw_needed[MAX_DPIA_NUM];
struct dc_link *link[MAX_DPIA_NUM];
int bw_needed[MAX_DPIA_NUM] = {0};
struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
int num_dpias = 0;
if (!num_streams || num_streams > MAX_DPIA_NUM)
return ret;
for (unsigned int i = 0; i < num_streams; ++i) {
if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
/* new dpia sst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)
return false;
for (uint8_t i = 0; i < num_streams; ++i) {
link[i] = stream[i].link;
bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
dc_link_get_highest_encoding_format(link[i]));
dpia_link[num_dpias] = stream[i].link;
bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
num_dpias++;
} else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
uint8_t j = 0;
/* check whether its a known dpia link */
for (; j < num_dpias; ++j) {
if (dpia_link[j] == stream[i].link)
break;
}
ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
return ret;
if (j == num_dpias) {
/* new dpia mst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)
return false;
else {
dpia_link[j] = stream[i].link;
num_dpias++;
}
}
bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
dc_link_get_highest_encoding_format(dpia_link[j]));
}
}
/* Include dp overheads */
for (uint8_t i = 0; i < num_dpias; ++i) {
int dp_overhead = 0;
dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
bw_needed[i] += dp_overhead;
}
return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}

View File

@ -54,11 +54,18 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
static void reset_bw_alloc_struct(struct dc_link *link)
{
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
link->dpia_bw_alloc_config.sink_verified_bw = 0;
link->dpia_bw_alloc_config.sink_max_bw = 0;
link->dpia_bw_alloc_config.link_verified_bw = 0;
link->dpia_bw_alloc_config.link_max_bw = 0;
link->dpia_bw_alloc_config.allocated_bw = 0;
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.dp_overhead = 0;
link->dpia_bw_alloc_config.response_ready = false;
link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
}
#define BW_GRANULARITY_0 4 // 0.25 Gbps
@ -104,6 +111,32 @@ static int get_estimated_bw(struct dc_link *link)
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
static int get_non_reduced_max_link_rate(struct dc_link *link)
{
uint8_t nrd_max_link_rate = 0;
core_link_read_dpcd(
link,
DP_TUNNELING_MAX_LINK_RATE,
&nrd_max_link_rate,
sizeof(uint8_t));
return nrd_max_link_rate;
}
static int get_non_reduced_max_lane_count(struct dc_link *link)
{
uint8_t nrd_max_lane_count = 0;
core_link_read_dpcd(
link,
DP_TUNNELING_MAX_LANE_COUNT,
&nrd_max_lane_count,
sizeof(uint8_t));
return nrd_max_lane_count;
}
/*
* Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
@ -111,13 +144,20 @@ static int get_estimated_bw(struct dc_link *link)
*/
static void init_usb4_bw_struct(struct dc_link *link)
{
// Init the known values
reset_bw_alloc_struct(link);
/* init the known values */
link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.bw_granularity,
link->dpia_bw_alloc_config.estimated_bw);
DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
__func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
link->dpia_bw_alloc_config.nrd_max_lane_count);
}
static uint8_t get_lowest_dpia_index(struct dc_link *link)
@ -142,39 +182,50 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
}
/*
* Get the Max Available BW or Max Estimated BW for each Host Router
* Get the maximum dp tunnel banwidth of host router
*
* @link: pointer to the dc_link struct instance
* @type: ESTIMATD BW or MAX AVAILABLE BW
* @dc: pointer to the dc struct instance
* @hr_index: host router index
*
* return: response_ready flag from dc_link struct
* return: host router maximum dp tunnel bandwidth
*/
static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
{
const struct dc *dc_struct = link->dc;
uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
struct dc_link *link_temp;
uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
uint8_t hr_index_temp = 0;
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
int i;
for (i = 0; i < MAX_PIPES * 2; ++i) {
for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
link_temp = dc_struct->links[i];
if (!link_temp || !link_temp->hpd_status)
continue;
hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
if (hr_index_temp == hr_index) {
link_dpia_primary = dc->links[i];
link_dpia_secondary = dc->links[i + 1];
if (idx_temp == idx) {
if (type == HOST_ROUTER_BW_ESTIMATED)
total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
else if (type == HOST_ROUTER_BW_ALLOCATED)
total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
/**
* If BW allocation enabled on both DPIAs, then
* HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
* otherwise HR BW = Estimated(bw alloc enabled dpia)
*/
if ((link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
(link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
} else if (link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
} else if (link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
}
break;
}
}
@ -194,7 +245,6 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
if (link) {
DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
__func__, link->link_index);
link->dpia_bw_alloc_config.sink_allocated_bw = 0;
reset_bw_alloc_struct(link);
}
}
@ -220,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
@ -343,9 +393,9 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
__func__, link->link_index);
DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
__func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
__func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
link->dpia_bw_alloc_config.allocated_bw = bw_needed;
link->dpia_bw_alloc_config.response_ready = true;
break;
@ -383,8 +433,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
link->dpia_bw_alloc_config.link_max_bw = peak_bw;
set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
do {
if (timeout > 0)
@ -396,8 +446,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
else if (link->dpia_bw_alloc_config.allocated_bw > 0)
ret = link->dpia_bw_alloc_config.allocated_bw;
}
//2. Cold Unplug
else if (!link->hpd_status)
@ -406,7 +456,6 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
out:
return ret;
}
bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
bool ret = false;
@ -414,7 +463,7 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
__func__, link->link_index, link->hpd_status,
link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
link->dpia_bw_alloc_config.allocated_bw, req_bw);
if (!get_bw_alloc_proceed_flag(link))
goto out;
@ -439,31 +488,70 @@ out:
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
{
bool ret = true;
int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
uint8_t lowest_dpia_index = 0, dpia_index = 0;
uint8_t i;
int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
uint8_t lowest_dpia_index, i, hr_index;
if (!num_dpias || num_dpias > MAX_DPIA_NUM)
return ret;
//Get total Host Router BW & Validate against each Host Router max BW
lowest_dpia_index = get_lowest_dpia_index(link[0]);
/* get total Host Router BW with granularity for the given modes */
for (i = 0; i < num_dpias; ++i) {
int granularity_Gbps = 0;
int bw_granularity = 0;
if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
continue;
lowest_dpia_index = get_lowest_dpia_index(link[i]);
if (link[i]->link_index < lowest_dpia_index)
continue;
dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
bw_needed_per_hr[hr_index] += bw_granularity;
}
/* validate against each Host Router max BW */
for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
if (bw_needed_per_hr[hr_index]) {
host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
ret = false;
break;
}
}
}
return ret;
}
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
{
int dp_overhead = 0, link_mst_overhead = 0;
if (!get_bw_alloc_proceed_flag((link)))
return dp_overhead;
/* if its mst link, add MTPH overhead */
if ((link->type == dc_connection_mst_branch) &&
!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
/* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
* MST overhead is 1/64 of link bandwidth (excluding any overhead)
*/
const struct dc_link_settings *link_cap =
dc_link_get_link_cap(link);
uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
(uint32_t)link_cap->lane_count *
LINK_RATE_REF_FREQ_IN_KHZ * 8;
link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
}
/* add all the overheads */
dp_overhead = link_mst_overhead;
return dp_overhead;
}

View File

@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
*/
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
/*
* Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
*
* return: DP overheads in DP tunneling
*/
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */

View File

@ -1034,7 +1034,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@ -1103,7 +1104,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@ -1378,10 +1380,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
if (!fence)
return;
ret = dma_fence_chain_find_seqno(&fence, entry->point);
if (ret != 0 || !fence) {
if (ret != 0) {
/* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
} else if (!fence) {
/* If dma_fence_chain_find_seqno returns 0 but sets the fence
* to NULL, it implies that the given seqno is signaled and a
* later seqno has already been submitted. Assign a stub fence
* so that the eventfd still gets signaled below.
*/
fence = dma_fence_get_stub();
}
list_del_init(&entry->node);

View File

@ -1212,7 +1212,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
format_map = 1 << conn_state->tv.mode;
format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@ -2295,7 +2295,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
* Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << conn_state->tv.mode;
format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@ -2360,7 +2360,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@ -2416,7 +2416,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@ -3071,7 +3071,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;

View File

@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
int format = conn_state->tv.mode;
int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@ -1710,7 +1710,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
connector->state->tv.mode = i;
connector->state->tv.legacy_mode = i;
}
static int
@ -1865,7 +1865,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (old_state->tv.mode != new_state->tv.mode ||
if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@ -1902,7 +1902,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
conn_state->tv.mode = 0;
conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@ -1916,7 +1916,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
conn_state->tv.mode);
conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);

View File

@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}

View File

@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}

View File

@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}

View File

@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
static void
shadow_fw_release(void *fw)
{
release_firmware(fw);
}
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
.fini = (void(*)(void *))release_firmware,
.fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};

View File

@ -384,7 +384,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = tt->pages;
struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;

View File

@ -4650,6 +4650,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
{ /* Logitech G Pro X Superlight Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
{ /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
{ /* G935 Gaming Headset */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),

View File

@ -800,6 +800,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike%d:blue:led", ts->id);
if (!led->name)
return -ENOMEM;
led->max_brightness = 1;
led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
led->brightness_get = &thunderstrike_led_get_brightness;
@ -831,6 +833,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
shield_dev->battery_dev.desc.name =
devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike_%d", ts->id);
if (!shield_dev->battery_dev.desc.name)
return -ENOMEM;
shield_dev->battery_dev.psy = power_supply_register(
&hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);

View File

@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)

View File

@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
int num_reg_temp_config;
struct device *hwmon_dev;
struct sensor_template_group tsi_temp_tg;
@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6775_REG_TEMP_CRIT;
@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6776_REG_TEMP_CRIT;
@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6798_REG_TEMP_OVER;
reg_temp_hyst = NCT6798_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6798_REG_TEMP_CRIT;
@ -4204,6 +4212,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
= reg_temp_crit[src - 1];
if (reg_temp_crit_l && reg_temp_crit_l[i])
data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
if (i < num_reg_temp_config)
data->reg_temp_config[src - 1] = reg_temp_config[i];
data->temp_src[src - 1] = src;
continue;
@ -4217,6 +4226,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
data->reg_temp[0][s] = reg_temp[i];
data->reg_temp[1][s] = reg_temp_over[i];
data->reg_temp[2][s] = reg_temp_hyst[i];
if (i < num_reg_temp_config)
data->reg_temp_config[s] = reg_temp_config[i];
if (reg_temp_crit_h && reg_temp_crit_h[i])
data->reg_temp[3][s] = reg_temp_crit_h[i];

View File

@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
/* flag the last byte as processed */
i2c_imx_slave_event(i2c_imx,
I2C_SLAVE_READ_PROCESSED, &value);
i2c_imx_slave_finish_op(i2c_imx);
return IRQ_HANDLED;
}

View File

@ -1809,7 +1809,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
/* SRQ resize is not supported */
break;
return -EINVAL;
case IB_SRQ_LIMIT:
/* Change the SRQ threshold */
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@ -1824,13 +1824,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
break;
return 0;
default:
ibdev_err(&rdev->ibdev,
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
return 0;
}
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)

View File

@ -748,6 +748,7 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (!rc)
srq->threshold = le16_to_cpu(sb->srq_limit);
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);

View File

@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
"Unable to allocate credit return DMA range for NUMA %d\n",
i);
ret = -ENOMEM;
goto done;
goto free_cr_base;
}
}
set_dev_node(&dd->pcidev->dev, dd->node);
@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
ret = 0;
done:
return ret;
free_cr_base:
free_credit_return(dd);
goto done;
}
void free_credit_return(struct hfi1_devdata *dd)

View File

@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);

View File

@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602

View File

@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
if (rf == dev_id) {
tasklet_kill(&rf->dpc_tasklet);
} else {
struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
tasklet_kill(&iwceq->dpc_tasklet);
}
}
/**

Some files were not shown because too many files have changed in this diff Show More