Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
net/devlink/leftover.c / net/core/devlink.c:565b4824c3
("devlink: change port event netdev notifier from per-net to global")f05bd8ebeb
("devlink: move code to a dedicated directory")687125b579
("devlink: split out core code") https://lore.kernel.org/all/20230208094657.379f2b1a@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
8697a258ae
1
.mailmap
1
.mailmap
|
@ -130,6 +130,7 @@ Domen Puncer <domen@coderock.org>
|
|||
Douglas Gilbert <dougg@torque.net>
|
||||
Ed L. Cashin <ecashin@coraid.com>
|
||||
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
|
||||
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
|
||||
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
|
||||
Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
|
||||
Felipe W Damasio <felipewd@terra.com.br>
|
||||
|
|
|
@ -1245,13 +1245,17 @@ PAGE_SIZE multiple when read back.
|
|||
This is a simple interface to trigger memory reclaim in the
|
||||
target cgroup.
|
||||
|
||||
This file accepts a string which contains the number of bytes to
|
||||
reclaim.
|
||||
This file accepts a single key, the number of bytes to reclaim.
|
||||
No nested keys are currently supported.
|
||||
|
||||
Example::
|
||||
|
||||
echo "1G" > memory.reclaim
|
||||
|
||||
The interface can be later extended with nested keys to
|
||||
configure the reclaim behavior. For example, specify the
|
||||
type of memory to reclaim from (anon, file, ..).
|
||||
|
||||
Please note that the kernel can over or under reclaim from
|
||||
the target cgroup. If less bytes are reclaimed than the
|
||||
specified amount, -EAGAIN is returned.
|
||||
|
@ -1263,13 +1267,6 @@ PAGE_SIZE multiple when read back.
|
|||
This means that the networking layer will not adapt based on
|
||||
reclaim induced by memory.reclaim.
|
||||
|
||||
This file also allows the user to specify the nodes to reclaim from,
|
||||
via the 'nodes=' key, for example::
|
||||
|
||||
echo "1G nodes=0,1" > memory.reclaim
|
||||
|
||||
The above instructs the kernel to reclaim memory from nodes 0,1.
|
||||
|
||||
memory.peak
|
||||
A read-only single value file which exists on non-root
|
||||
cgroups.
|
||||
|
|
|
@ -2,3 +2,8 @@
|
|||
*.example.dts
|
||||
/processed-schema*.yaml
|
||||
/processed-schema*.json
|
||||
|
||||
#
|
||||
# We don't want to ignore the following even if they are dot-files
|
||||
#
|
||||
!.yamllint
|
||||
|
|
|
@ -108,7 +108,7 @@ properties:
|
|||
|
||||
msi-controller:
|
||||
description:
|
||||
Only present if the Message Based Interrupt functionnality is
|
||||
Only present if the Message Based Interrupt functionality is
|
||||
being exposed by the HW, and the mbi-ranges property present.
|
||||
|
||||
mbi-ranges:
|
||||
|
|
|
@ -40,6 +40,8 @@ properties:
|
|||
description:
|
||||
Indicates that the setting of RTC time is allowed by the host CPU.
|
||||
|
||||
wakeup-source: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -16,5 +16,5 @@ Contents
|
|||
|
||||
Support
|
||||
=======
|
||||
If you got any problem, contact Wangxun support team via support@trustnetic.com
|
||||
If you got any problem, contact Wangxun support team via nic-support@net-swift.com
|
||||
and Cc: netdev.
|
||||
|
|
|
@ -8070,9 +8070,13 @@ considering the state as complete. VMM needs to ensure that the dirty
|
|||
state is final and avoid missing dirty pages from another ioctl ordered
|
||||
after the bitmap collection.
|
||||
|
||||
NOTE: One example of using the backup bitmap is saving arm64 vgic/its
|
||||
tables through KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} command on
|
||||
KVM device "kvm-arm-vgic-its" when dirty ring is enabled.
|
||||
NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
|
||||
tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
|
||||
KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
|
||||
command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
|
||||
"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
|
||||
vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
|
||||
command on KVM device "kvm-arm-vgic-v3".
|
||||
|
||||
8.30 KVM_CAP_XEN_HVM
|
||||
--------------------
|
||||
|
|
|
@ -15679,7 +15679,7 @@ OPENRISC ARCHITECTURE
|
|||
M: Jonas Bonn <jonas@southpole.se>
|
||||
M: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
|
||||
M: Stafford Horne <shorne@gmail.com>
|
||||
L: openrisc@lists.librecores.org
|
||||
L: linux-openrisc@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://openrisc.io
|
||||
T: git https://github.com/openrisc/linux.git
|
||||
|
@ -21752,6 +21752,7 @@ F: include/uapi/linux/uvcvideo.h
|
|||
|
||||
USB WEBCAM GADGET
|
||||
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
M: Daniel Scally <dan.scally@ideasonboard.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/gadget/function/*uvc*
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -2187,7 +2187,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
|
|||
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
|
||||
ite->collection->collection_id;
|
||||
val = cpu_to_le64(val);
|
||||
return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
|
||||
return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2339,7 +2339,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
|
|||
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
|
||||
(dev->num_eventid_bits - 1));
|
||||
val = cpu_to_le64(val);
|
||||
return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
|
||||
return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2526,7 +2526,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
|
|||
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
|
||||
collection->collection_id);
|
||||
val = cpu_to_le64(val);
|
||||
return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
|
||||
return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2607,7 +2607,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
|
|||
*/
|
||||
val = 0;
|
||||
BUG_ON(cte_esz > sizeof(val));
|
||||
ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
|
||||
ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2743,7 +2743,6 @@ static int vgic_its_has_attr(struct kvm_device *dev,
|
|||
static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
|
||||
{
|
||||
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int ret = 0;
|
||||
|
||||
if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
|
||||
|
@ -2763,9 +2762,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
|
|||
vgic_its_reset(kvm, its);
|
||||
break;
|
||||
case KVM_DEV_ARM_ITS_SAVE_TABLES:
|
||||
dist->save_its_tables_in_progress = true;
|
||||
ret = abi->save_tables(its);
|
||||
dist->save_its_tables_in_progress = false;
|
||||
break;
|
||||
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
|
||||
ret = abi->restore_tables(its);
|
||||
|
@ -2792,7 +2789,7 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
|
|||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
return dist->save_its_tables_in_progress;
|
||||
return dist->table_write_in_progress;
|
||||
}
|
||||
|
||||
static int vgic_its_set_attr(struct kvm_device *dev,
|
||||
|
|
|
@ -339,7 +339,7 @@ retry:
|
|||
if (status) {
|
||||
/* clear consumed data */
|
||||
val &= ~(1 << bit_nr);
|
||||
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
|
||||
ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -434,7 +434,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
|
|||
else
|
||||
val &= ~(1 << bit_nr);
|
||||
|
||||
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
|
||||
ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#define __KVM_ARM_VGIC_NEW_H__
|
||||
|
||||
#include <linux/irqchip/arm-gic-common.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
|
||||
#define IMPLEMENTER_ARM 0x43b
|
||||
|
@ -131,6 +132,19 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
|
|||
return vgic_irq_get_lr_count(irq) > 1;
|
||||
}
|
||||
|
||||
static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
||||
const void *data, unsigned long len)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int ret;
|
||||
|
||||
dist->table_write_in_progress = true;
|
||||
ret = kvm_write_guest_lock(kvm, gpa, data, len);
|
||||
dist->table_write_in_progress = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This struct provides an intermediate representation of the fields contained
|
||||
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
|
||||
|
|
|
@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
|
|||
asmlinkage long
|
||||
ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
|
||||
{
|
||||
struct timespec64 rtn_tp;
|
||||
s64 tick_ns;
|
||||
|
||||
/*
|
||||
* ia64's clock_gettime() syscall is implemented as a vdso call
|
||||
* fsys_clock_gettime(). Currently it handles only
|
||||
|
@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *
|
|||
switch (which_clock) {
|
||||
case CLOCK_REALTIME:
|
||||
case CLOCK_MONOTONIC:
|
||||
s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
|
||||
struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
|
||||
tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
|
||||
rtn_tp = ns_to_timespec64(tick_ns);
|
||||
return put_timespec64(&rtn_tp, tp);
|
||||
}
|
||||
|
||||
|
|
|
@ -1303,7 +1303,7 @@ static char iodc_dbuf[4096] __page_aligned_bss;
|
|||
*/
|
||||
int pdc_iodc_print(const unsigned char *str, unsigned count)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int i, found = 0;
|
||||
unsigned long flags;
|
||||
|
||||
count = min_t(unsigned int, count, sizeof(iodc_dbuf));
|
||||
|
@ -1315,6 +1315,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
|
|||
iodc_dbuf[i+0] = '\r';
|
||||
iodc_dbuf[i+1] = '\n';
|
||||
i += 2;
|
||||
found = 1;
|
||||
goto print;
|
||||
default:
|
||||
iodc_dbuf[i] = str[i];
|
||||
|
@ -1330,7 +1331,7 @@ print:
|
|||
__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
|
||||
spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
|
||||
return i;
|
||||
return i - found;
|
||||
}
|
||||
|
||||
#if !defined(BOOTLOADER)
|
||||
|
|
|
@ -126,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
unsigned long tmp;
|
||||
long ret = -EIO;
|
||||
|
||||
unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
|
||||
#ifdef CONFIG_64BIT
|
||||
if (is_compat_task())
|
||||
user_regs_struct_size /= 2;
|
||||
#endif
|
||||
|
||||
switch (request) {
|
||||
|
||||
/* Read the word at location addr in the USER area. For ptraced
|
||||
|
@ -166,7 +172,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
addr >= sizeof(struct pt_regs))
|
||||
break;
|
||||
if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
|
||||
data |= 3; /* ensure userspace privilege */
|
||||
data |= PRIV_USER; /* ensure userspace privilege */
|
||||
}
|
||||
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
|
||||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
|
||||
|
@ -181,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
return copy_regset_to_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct user_regs_struct),
|
||||
0, user_regs_struct_size,
|
||||
datap);
|
||||
|
||||
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
||||
return copy_regset_from_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct user_regs_struct),
|
||||
0, user_regs_struct_size,
|
||||
datap);
|
||||
|
||||
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
||||
|
@ -285,7 +291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
if (addr >= sizeof(struct pt_regs))
|
||||
break;
|
||||
if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
|
||||
data |= 3; /* ensure userspace privilege */
|
||||
data |= PRIV_USER; /* ensure userspace privilege */
|
||||
}
|
||||
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
|
||||
/* Special case, fp regs are 64 bits anyway */
|
||||
|
@ -302,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
}
|
||||
}
|
||||
break;
|
||||
case PTRACE_GETREGS:
|
||||
case PTRACE_SETREGS:
|
||||
case PTRACE_GETFPREGS:
|
||||
case PTRACE_SETFPREGS:
|
||||
return arch_ptrace(child, request, addr, data);
|
||||
|
||||
default:
|
||||
ret = compat_ptrace_request(child, request, addr, data);
|
||||
|
@ -484,7 +495,7 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
|
|||
case RI(iaoq[0]):
|
||||
case RI(iaoq[1]):
|
||||
/* set 2 lowest bits to ensure userspace privilege: */
|
||||
regs->iaoq[num - RI(iaoq[0])] = val | 3;
|
||||
regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER;
|
||||
return;
|
||||
case RI(sar): regs->sar = val;
|
||||
return;
|
||||
|
|
|
@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
|
|||
{
|
||||
if (radix_enabled())
|
||||
radix__tlb_flush(tlb);
|
||||
|
||||
return hash__tlb_flush(tlb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
|
||||
{
|
||||
unsigned long flags = irq_soft_mask_return();
|
||||
|
||||
irq_soft_mask_set(flags & ~mask);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return irq_soft_mask_return();
|
||||
|
@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void)
|
|||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
return irq_soft_mask_set_return(IRQS_DISABLED);
|
||||
return irq_soft_mask_or_return(IRQS_DISABLED);
|
||||
}
|
||||
|
||||
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
||||
|
@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
|
|||
* is a different soft-masked interrupt pending that requires hard
|
||||
* masking.
|
||||
*/
|
||||
static inline bool should_hard_irq_enable(void)
|
||||
static inline bool should_hard_irq_enable(struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
|
||||
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
}
|
||||
|
||||
|
@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
|
|||
*
|
||||
* TODO: Add test for 64e
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
|
||||
return false;
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
|
||||
if (!power_pmu_wants_prompt_pmi())
|
||||
return false;
|
||||
/*
|
||||
* If PMIs are disabled then IRQs should be disabled as well,
|
||||
* so we shouldn't see this condition, check for it just in
|
||||
* case because we are about to enable PMIs.
|
||||
*/
|
||||
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
|
||||
return false;
|
||||
|
@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
|
|||
|
||||
/*
|
||||
* Do the hard enabling, only call this if should_hard_irq_enable is true.
|
||||
* This allows PMI interrupts to profile irq handlers.
|
||||
*/
|
||||
static inline void do_hard_irq_enable(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
}
|
||||
/*
|
||||
* This allows PMI interrupts (and watchdog soft-NMIs) through.
|
||||
* There is no other reason to enable this way.
|
||||
* Asynch interrupts come in with IRQS_ALL_DISABLED,
|
||||
* PACA_IRQ_HARD_DIS, and MSR[EE]=0.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
|
||||
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
|
||||
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
__hard_irq_enable();
|
||||
}
|
||||
|
@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
|
|||
return !(regs->msr & MSR_EE);
|
||||
}
|
||||
|
||||
static __always_inline bool should_hard_irq_enable(void)
|
||||
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
|
|||
|
||||
ppc_msgsync();
|
||||
|
||||
if (should_hard_irq_enable())
|
||||
if (should_hard_irq_enable(regs))
|
||||
do_hard_irq_enable();
|
||||
|
||||
kvmppc_clear_host_ipi(smp_processor_id());
|
||||
|
|
|
@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
|
|||
* SPE unavailable trap from kernel - print a message, but let
|
||||
* the task use SPE in the kernel until it returns to user mode.
|
||||
*/
|
||||
KernelSPE:
|
||||
SYM_FUNC_START_LOCAL(KernelSPE)
|
||||
lwz r3,_MSR(r1)
|
||||
oris r3,r3,MSR_SPE@h
|
||||
stw r3,_MSR(r1) /* enable use of SPE after return */
|
||||
|
@ -881,6 +881,7 @@ KernelSPE:
|
|||
#endif
|
||||
.align 4,0
|
||||
|
||||
SYM_FUNC_END(KernelSPE)
|
||||
#endif /* CONFIG_SPE */
|
||||
|
||||
/*
|
||||
|
|
|
@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
|
|||
irq = static_call(ppc_get_irq)();
|
||||
|
||||
/* We can hard enable interrupts now to allow perf interrupts */
|
||||
if (should_hard_irq_enable())
|
||||
if (should_hard_irq_enable(regs))
|
||||
do_hard_irq_enable();
|
||||
|
||||
/* And finally process it */
|
||||
|
|
|
@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
|
|||
}
|
||||
|
||||
/* Conditionally hard-enable interrupts. */
|
||||
if (should_hard_irq_enable()) {
|
||||
if (should_hard_irq_enable(regs)) {
|
||||
/*
|
||||
* Ensure a positive value is written to the decrementer, or
|
||||
* else some CPUs will continue to take decrementer exceptions.
|
||||
|
|
|
@ -989,10 +989,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
|
|||
* linux,drconf-usable-memory properties. Get an approximate on the
|
||||
* number of usable memory entries and use for FDT size estimation.
|
||||
*/
|
||||
usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
|
||||
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
|
||||
|
||||
extra_size = (unsigned int)(usm_entries * sizeof(u64));
|
||||
if (drmem_lmb_size()) {
|
||||
usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
|
||||
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
|
||||
extra_size = (unsigned int)(usm_entries * sizeof(u64));
|
||||
} else {
|
||||
extra_size = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the number of CPU nodes in the current DT. This allows to
|
||||
|
|
|
@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
|
||||
{
|
||||
ulong r1, ip, msr, lr;
|
||||
ulong r1, msr, lr;
|
||||
|
||||
asm("mr %0, 1" : "=r"(r1));
|
||||
asm("mflr %0" : "=r"(lr));
|
||||
asm("mfmsr %0" : "=r"(msr));
|
||||
asm("bl 1f; 1: mflr %0" : "=r"(ip));
|
||||
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
regs->gpr[1] = r1;
|
||||
regs->nip = ip;
|
||||
regs->nip = _THIS_IP_;
|
||||
regs->msr = msr;
|
||||
regs->link = lr;
|
||||
}
|
||||
|
|
|
@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
|
|||
end = (unsigned long)__end_rodata;
|
||||
|
||||
radix__change_memory_range(start, end, _PAGE_WRITE);
|
||||
|
||||
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
|
||||
end = start + PAGE_SIZE;
|
||||
if (overlaps_interrupt_vector_text(start, end))
|
||||
radix__change_memory_range(start, end, _PAGE_WRITE);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void radix__mark_initmem_nx(void)
|
||||
|
@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
|
|||
static unsigned long next_boundary(unsigned long addr, unsigned long end)
|
||||
{
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
unsigned long stext_phys;
|
||||
|
||||
stext_phys = __pa_symbol(_stext);
|
||||
|
||||
// Relocatable kernel running at non-zero real address
|
||||
if (stext_phys != 0) {
|
||||
// The end of interrupts code at zero is a rodata boundary
|
||||
unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
|
||||
if (addr < end_intr)
|
||||
return end_intr;
|
||||
|
||||
// Start of relocated kernel text is a rodata boundary
|
||||
if (addr < stext_phys)
|
||||
return stext_phys;
|
||||
}
|
||||
|
||||
if (addr < __pa_symbol(__srwx_boundary))
|
||||
return __pa_symbol(__srwx_boundary);
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
* Used to avoid races in counting the nest-pmu units during hotplug
|
||||
* register and unregister
|
||||
*/
|
||||
static DEFINE_SPINLOCK(nest_init_lock);
|
||||
static DEFINE_MUTEX(nest_init_lock);
|
||||
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
|
||||
static struct imc_pmu **per_nest_pmu_arr;
|
||||
static cpumask_t nest_imc_cpumask;
|
||||
|
@ -1629,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
|
|||
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
{
|
||||
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
|
||||
spin_lock(&nest_init_lock);
|
||||
mutex_lock(&nest_init_lock);
|
||||
if (nest_pmus == 1) {
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
|
||||
kfree(nest_imc_refc);
|
||||
|
@ -1639,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
|||
|
||||
if (nest_pmus > 0)
|
||||
nest_pmus--;
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
}
|
||||
|
||||
/* Free core_imc memory */
|
||||
|
@ -1796,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
|||
* rest. To handle the cpuhotplug callback unregister, we track
|
||||
* the number of nest pmus in "nest_pmus".
|
||||
*/
|
||||
spin_lock(&nest_init_lock);
|
||||
mutex_lock(&nest_init_lock);
|
||||
if (nest_pmus == 0) {
|
||||
ret = init_nest_pmu_ref();
|
||||
if (ret) {
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
goto err_free_mem;
|
||||
|
@ -1808,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
|||
/* Register for cpu hotplug notification. */
|
||||
ret = nest_pmu_cpumask_init();
|
||||
if (ret) {
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
kfree(nest_imc_refc);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
|
@ -1816,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
|||
}
|
||||
}
|
||||
nest_pmus++;
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
break;
|
||||
case IMC_DOMAIN_CORE:
|
||||
ret = core_imc_pmu_cpumask_init();
|
||||
|
|
|
@ -80,6 +80,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
|
|||
KBUILD_CFLAGS += -fno-omit-frame-pointer
|
||||
endif
|
||||
|
||||
# Avoid generating .eh_frame sections.
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
||||
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
|
||||
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
|
||||
|
||||
|
|
|
@ -70,7 +70,6 @@ static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX);
|
|||
*/
|
||||
enum riscv_isa_ext_key {
|
||||
RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */
|
||||
RISCV_ISA_EXT_KEY_ZIHINTPAUSE,
|
||||
RISCV_ISA_EXT_KEY_SVINVAL,
|
||||
RISCV_ISA_EXT_KEY_MAX,
|
||||
};
|
||||
|
@ -91,8 +90,6 @@ static __always_inline int riscv_isa_ext2key(int num)
|
|||
return RISCV_ISA_EXT_KEY_FPU;
|
||||
case RISCV_ISA_EXT_d:
|
||||
return RISCV_ISA_EXT_KEY_FPU;
|
||||
case RISCV_ISA_EXT_ZIHINTPAUSE:
|
||||
return RISCV_ISA_EXT_KEY_ZIHINTPAUSE;
|
||||
case RISCV_ISA_EXT_SVINVAL:
|
||||
return RISCV_ISA_EXT_KEY_SVINVAL;
|
||||
default:
|
||||
|
|
|
@ -4,30 +4,26 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
static inline void cpu_relax(void)
|
||||
{
|
||||
if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) {
|
||||
#ifdef __riscv_muldiv
|
||||
int dummy;
|
||||
/* In lieu of a halt instruction, induce a long-latency stall. */
|
||||
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
|
||||
int dummy;
|
||||
/* In lieu of a halt instruction, induce a long-latency stall. */
|
||||
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
|
||||
#endif
|
||||
} else {
|
||||
/*
|
||||
* Reduce instruction retirement.
|
||||
* This assumes the PC changes.
|
||||
*/
|
||||
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
|
||||
__asm__ __volatile__ ("pause");
|
||||
|
||||
#ifdef __riscv_zihintpause
|
||||
/*
|
||||
* Reduce instruction retirement.
|
||||
* This assumes the PC changes.
|
||||
*/
|
||||
__asm__ __volatile__ ("pause");
|
||||
#else
|
||||
/* Encoding of the pause instruction */
|
||||
__asm__ __volatile__ (".4byte 0x100000F");
|
||||
/* Encoding of the pause instruction */
|
||||
__asm__ __volatile__ (".4byte 0x100000F");
|
||||
#endif
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,21 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
|
|||
post_kprobe_handler(p, kcb, regs);
|
||||
}
|
||||
|
||||
static bool __kprobes arch_check_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long tmp = (unsigned long)p->addr - p->offset;
|
||||
unsigned long addr = (unsigned long)p->addr;
|
||||
|
||||
while (tmp <= addr) {
|
||||
if (tmp == addr)
|
||||
return true;
|
||||
|
||||
tmp += GET_INSN_LENGTH(*(u16 *)tmp);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long probe_addr = (unsigned long)p->addr;
|
||||
|
@ -55,6 +70,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
|||
if (probe_addr & 0x1)
|
||||
return -EILSEQ;
|
||||
|
||||
if (!arch_check_kprobe(p))
|
||||
return -EILSEQ;
|
||||
|
||||
/* copy instruction */
|
||||
p->opcode = *p->addr;
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Written by Niibe Yutaka and Paul Mundt
|
||||
*/
|
||||
OUTPUT_ARCH(sh)
|
||||
#define RUNTIME_DISCARD_EXIT
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/vmlinux.lds.h>
|
||||
|
|
|
@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno)
|
|||
asm("mov %%db6, %0" :"=r" (val));
|
||||
break;
|
||||
case 7:
|
||||
asm("mov %%db7, %0" :"=r" (val));
|
||||
/*
|
||||
* Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
|
||||
* with other code.
|
||||
*
|
||||
* This is needed because a DR7 access can cause a #VC exception
|
||||
* when running under SEV-ES. Taking a #VC exception is not a
|
||||
* safe thing to do just anywhere in the entry code and
|
||||
* re-ordering might place the access into an unsafe location.
|
||||
*
|
||||
* This happened in the NMI handler, where the DR7 read was
|
||||
* re-ordered to happen before the call to sev_es_ist_enter(),
|
||||
* causing stack recursion.
|
||||
*/
|
||||
asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
|
|||
asm("mov %0, %%db6" ::"r" (value));
|
||||
break;
|
||||
case 7:
|
||||
asm("mov %0, %%db7" ::"r" (value));
|
||||
/*
|
||||
* Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
|
||||
* with other code.
|
||||
*
|
||||
* While is didn't happen with a DR7 write (see the DR7 read
|
||||
* comment above which explains where it happened), add the
|
||||
* __FORCE_ORDER here too to avoid similar problems in the
|
||||
* future.
|
||||
*/
|
||||
asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -769,8 +769,8 @@ static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
|||
* request from the old cgroup.
|
||||
*/
|
||||
bfq_put_cooperator(sync_bfqq);
|
||||
bfq_release_process_ref(bfqd, sync_bfqq);
|
||||
bic_set_bfqq(bic, NULL, true);
|
||||
bfq_release_process_ref(bfqd, sync_bfqq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5425,9 +5425,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
|
|||
|
||||
bfqq = bic_to_bfqq(bic, false);
|
||||
if (bfqq) {
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
struct bfq_queue *old_bfqq = bfqq;
|
||||
|
||||
bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
|
||||
bic_set_bfqq(bic, bfqq, false);
|
||||
bfq_release_process_ref(bfqd, old_bfqq);
|
||||
}
|
||||
|
||||
bfqq = bic_to_bfqq(bic, true);
|
||||
|
|
|
@ -2001,6 +2001,10 @@ void blk_cgroup_bio_start(struct bio *bio)
|
|||
struct blkg_iostat_set *bis;
|
||||
unsigned long flags;
|
||||
|
||||
/* Root-level stats are sourced from system-wide IO stats */
|
||||
if (!cgroup_parent(blkcg->css.cgroup))
|
||||
return;
|
||||
|
||||
cpu = get_cpu();
|
||||
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
|
||||
flags = u64_stats_update_begin_irqsave(&bis->sync);
|
||||
|
|
|
@ -4069,8 +4069,9 @@ EXPORT_SYMBOL(blk_mq_init_queue);
|
|||
* blk_mq_destroy_queue - shutdown a request queue
|
||||
* @q: request queue to shutdown
|
||||
*
|
||||
* This shuts down a request queue allocated by blk_mq_init_queue() and drops
|
||||
* the initial reference. All future requests will failed with -ENODEV.
|
||||
* This shuts down a request queue allocated by blk_mq_init_queue(). All future
|
||||
* requests will be failed with -ENODEV. The caller is responsible for dropping
|
||||
* the reference from blk_mq_init_queue() by calling blk_put_queue().
|
||||
*
|
||||
* Context: can sleep
|
||||
*/
|
||||
|
|
|
@ -23,8 +23,8 @@ $(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
|
|||
targets += blacklist_hash_list
|
||||
|
||||
quiet_cmd_extract_certs = CERT $@
|
||||
cmd_extract_certs = $(obj)/extract-cert $(extract-cert-in) $@
|
||||
extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
|
||||
cmd_extract_certs = $(obj)/extract-cert "$(extract-cert-in)" $@
|
||||
extract-cert-in = $(filter-out $(obj)/extract-cert, $(real-prereqs))
|
||||
|
||||
$(obj)/system_certificates.o: $(obj)/x509_certificate_list
|
||||
|
||||
|
|
|
@ -3109,7 +3109,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
|
|||
*/
|
||||
if (spd > 1)
|
||||
mask &= (1 << (spd - 1)) - 1;
|
||||
else
|
||||
else if (link->sata_spd)
|
||||
return -EINVAL;
|
||||
|
||||
/* were we already at the bottom? */
|
||||
|
|
|
@ -137,7 +137,7 @@ struct ublk_device {
|
|||
|
||||
char *__queues;
|
||||
|
||||
unsigned short queue_size;
|
||||
unsigned int queue_size;
|
||||
struct ublksrv_ctrl_dev_info dev_info;
|
||||
|
||||
struct blk_mq_tag_set tag_set;
|
||||
|
|
|
@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
|
|||
0, 0);
|
||||
|
||||
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&dma_fence_stub.flags);
|
||||
&fence->flags);
|
||||
|
||||
dma_fence_signal(fence);
|
||||
|
||||
|
|
|
@ -1007,6 +1007,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|||
/* first try to find a slot in an existing linked list entry */
|
||||
for (prsv = efi_memreserve_root->next; prsv; ) {
|
||||
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
||||
if (!rsv)
|
||||
return -ENOMEM;
|
||||
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
||||
if (index < rsv->size) {
|
||||
rsv->entry[index].base = addr;
|
||||
|
|
|
@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (tbl->version > 1) {
|
||||
if (tbl->version > 2) {
|
||||
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
|
||||
tbl->version);
|
||||
goto unmap;
|
||||
|
|
|
@ -574,20 +574,27 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
|
|||
len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
|
||||
sec->fw_name_id);
|
||||
sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
|
||||
if (!sec->fw_name)
|
||||
return -ENOMEM;
|
||||
if (!sec->fw_name) {
|
||||
ret = -ENOMEM;
|
||||
goto fw_name_fail;
|
||||
}
|
||||
|
||||
fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
|
||||
&m10bmc_ops, sec);
|
||||
if (IS_ERR(fwl)) {
|
||||
dev_err(sec->dev, "Firmware Upload driver failed to start\n");
|
||||
kfree(sec->fw_name);
|
||||
xa_erase(&fw_upload_xa, sec->fw_name_id);
|
||||
return PTR_ERR(fwl);
|
||||
ret = PTR_ERR(fwl);
|
||||
goto fw_uploader_fail;
|
||||
}
|
||||
|
||||
sec->fwl = fwl;
|
||||
return 0;
|
||||
|
||||
fw_uploader_fail:
|
||||
kfree(sec->fw_name);
|
||||
fw_name_fail:
|
||||
xa_erase(&fw_upload_xa, sec->fw_name_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int m10bmc_sec_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
|
|||
/* Allocate buffers from the service layer's pool. */
|
||||
for (i = 0; i < NUM_SVC_BUFS; i++) {
|
||||
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
|
||||
if (!kbuf) {
|
||||
if (IS_ERR(kbuf)) {
|
||||
s10_free_buffers(mgr);
|
||||
ret = -ENOMEM;
|
||||
ret = PTR_ERR(kbuf);
|
||||
goto init_done;
|
||||
}
|
||||
|
||||
|
|
|
@ -790,8 +790,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
|
|||
* zero here */
|
||||
WARN_ON(simd != 0);
|
||||
|
||||
/* type 2 wave data */
|
||||
dst[(*no_fields)++] = 2;
|
||||
/* type 3 wave data */
|
||||
dst[(*no_fields)++] = 3;
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
|
||||
|
|
|
@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
|
|||
|
||||
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
return;
|
||||
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
|
||||
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
|
||||
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
|
||||
|
|
|
@ -640,7 +640,8 @@ static int soc21_common_early_init(void *handle)
|
|||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_SD;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
|
|
|
@ -4501,6 +4501,17 @@ DEVICE_ATTR_WO(s3_debug);
|
|||
static int dm_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
struct atom_context *ctx = mode_info->atom_context;
|
||||
int index = GetIndexIntoMasterTable(DATA, Object_Header);
|
||||
u16 data_offset;
|
||||
|
||||
/* if there is no object header, skip DM */
|
||||
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
|
||||
dev_info(adev->dev, "No object header, skipping DM\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
|
|
|
@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {
|
|||
},
|
||||
|
||||
// 6:1 downscaling ratio: 1000/6 = 166.666
|
||||
// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
|
||||
.max_downscale_factor = {
|
||||
.argb8888 = 167,
|
||||
.argb8888 = 250,
|
||||
.nv12 = 167,
|
||||
.fp16 = 167
|
||||
},
|
||||
|
@ -1763,7 +1764,7 @@ static bool dcn314_resource_construct(
|
|||
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
|
||||
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
|
||||
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
|
||||
dc->caps.max_downscale_ratio = 600;
|
||||
dc->caps.max_downscale_ratio = 400;
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.i2c_speed_in_khz_hdcp = 100;
|
||||
dc->caps.max_cursor_size = 256;
|
||||
|
|
|
@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
|
|||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
|
||||
.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
|
||||
.does_plane_fit_in_mall = NULL,
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.hardware_release = dcn30_hardware_release,
|
||||
|
|
|
@ -3183,7 +3183,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
|||
} else {
|
||||
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
|
||||
}
|
||||
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
|
||||
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
|
||||
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
|
||||
<= (isInterlaceTiming ?
|
||||
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
|
||||
|
|
|
@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
|
|||
if (dmub->hw_funcs.reset)
|
||||
dmub->hw_funcs.reset(dmub);
|
||||
|
||||
/* reset the cache of the last wptr as well now that hw is reset */
|
||||
dmub->inbox1_last_wptr = 0;
|
||||
|
||||
cw0.offset.quad_part = inst_fb->gpu_addr;
|
||||
cw0.region.base = DMUB_CW0_BASE;
|
||||
cw0.region.top = cw0.region.base + inst_fb->size - 1;
|
||||
|
@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
|
|||
if (dmub->hw_funcs.reset)
|
||||
dmub->hw_funcs.reset(dmub);
|
||||
|
||||
/* mailboxes have been reset in hw, so reset the sw state as well */
|
||||
dmub->inbox1_last_wptr = 0;
|
||||
dmub->inbox1_rb.wrpt = 0;
|
||||
dmub->inbox1_rb.rptr = 0;
|
||||
dmub->outbox0_rb.wrpt = 0;
|
||||
dmub->outbox0_rb.rptr = 0;
|
||||
dmub->outbox1_rb.wrpt = 0;
|
||||
dmub->outbox1_rb.rptr = 0;
|
||||
|
||||
dmub->hw_init = false;
|
||||
|
||||
return DMUB_STATUS_OK;
|
||||
|
|
|
@ -2007,14 +2007,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
|||
gc_ver == IP_VERSION(10, 3, 0) ||
|
||||
gc_ver == IP_VERSION(10, 1, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 0) ||
|
||||
gc_ver == IP_VERSION(11, 0, 2)))
|
||||
gc_ver == IP_VERSION(11, 0, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 3)))
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
|
||||
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
|
||||
gc_ver == IP_VERSION(10, 3, 0) ||
|
||||
gc_ver == IP_VERSION(10, 1, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 0) ||
|
||||
gc_ver == IP_VERSION(11, 0, 2)))
|
||||
gc_ver == IP_VERSION(11, 0, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 3)))
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
|
||||
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
|
||||
|
|
|
@ -1499,6 +1499,20 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
|
||||
* for gpu reset case. Driver involvement is unnecessary.
|
||||
*/
|
||||
if (amdgpu_in_reset(adev)) {
|
||||
switch (adev->ip_versions[MP1_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 4):
|
||||
case IP_VERSION(13, 0, 11):
|
||||
return 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For gpu reset, runpm and hibernation through BACO,
|
||||
* BACO feature has to be kept enabled.
|
||||
|
|
|
@ -1319,7 +1319,7 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
|
|||
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
|
||||
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
|
||||
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
|
||||
{ .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
|
||||
{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
|
||||
|
||||
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
|
||||
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
|
||||
|
|
|
@ -1861,12 +1861,20 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
|
|||
vm = ctx->vm;
|
||||
GEM_BUG_ON(!vm);
|
||||
|
||||
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Get a reference for the allocated handle. Once the handle is
|
||||
* visible in the vm_xa table, userspace could try to close it
|
||||
* from under our feet, so we need to hold the extra reference
|
||||
* first.
|
||||
*/
|
||||
i915_vm_get(vm);
|
||||
|
||||
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
if (err) {
|
||||
i915_vm_put(vm);
|
||||
return err;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
|
||||
args->value = id;
|
||||
args->size = 0;
|
||||
|
|
|
@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
obj->tiling_and_stride = tiling | stride;
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
/* Force the fence to be reacquired for GTT access */
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
/* Try to preallocate memory required to save swizzling on put-pages */
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||
|
@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
obj->bit_17 = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
/* Force the fence to be reacquired for GTT access */
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -528,7 +528,7 @@ retry:
|
|||
return rq;
|
||||
}
|
||||
|
||||
struct i915_request *intel_context_find_active_request(struct intel_context *ce)
|
||||
struct i915_request *intel_context_get_active_request(struct intel_context *ce)
|
||||
{
|
||||
struct intel_context *parent = intel_context_to_parent(ce);
|
||||
struct i915_request *rq, *active = NULL;
|
||||
|
@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
|
|||
|
||||
active = rq;
|
||||
}
|
||||
if (active)
|
||||
active = i915_request_get_rcu(active);
|
||||
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
|
||||
|
||||
return active;
|
||||
|
|
|
@ -268,8 +268,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
|||
|
||||
struct i915_request *intel_context_create_request(struct intel_context *ce);
|
||||
|
||||
struct i915_request *
|
||||
intel_context_find_active_request(struct intel_context *ce);
|
||||
struct i915_request *intel_context_get_active_request(struct intel_context *ce);
|
||||
|
||||
static inline bool intel_context_is_barrier(const struct intel_context *ce)
|
||||
{
|
||||
|
|
|
@ -248,8 +248,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
|
|||
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
|
||||
ktime_t *now);
|
||||
|
||||
struct i915_request *
|
||||
intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
|
||||
void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
|
||||
struct intel_context **ce, struct i915_request **rq);
|
||||
|
||||
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
|
||||
struct intel_context *
|
||||
|
|
|
@ -2094,17 +2094,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
|
|||
}
|
||||
}
|
||||
|
||||
static unsigned long list_count(struct list_head *list)
|
||||
{
|
||||
struct list_head *pos;
|
||||
unsigned long count = 0;
|
||||
|
||||
list_for_each(pos, list)
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned long read_ul(void *p, size_t x)
|
||||
{
|
||||
return *(unsigned long *)(p + x);
|
||||
|
@ -2196,11 +2185,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
|
|||
}
|
||||
}
|
||||
|
||||
static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
|
||||
static void engine_dump_active_requests(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m)
|
||||
{
|
||||
struct intel_context *hung_ce = NULL;
|
||||
struct i915_request *hung_rq = NULL;
|
||||
struct intel_context *ce;
|
||||
bool guc;
|
||||
|
||||
/*
|
||||
* No need for an engine->irq_seqno_barrier() before the seqno reads.
|
||||
|
@ -2209,27 +2198,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
|
|||
* But the intention here is just to report an instantaneous snapshot
|
||||
* so that's fine.
|
||||
*/
|
||||
lockdep_assert_held(&engine->sched_engine->lock);
|
||||
intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
|
||||
|
||||
drm_printf(m, "\tRequests:\n");
|
||||
|
||||
guc = intel_uc_uses_guc_submission(&engine->gt->uc);
|
||||
if (guc) {
|
||||
ce = intel_engine_get_hung_context(engine);
|
||||
if (ce)
|
||||
hung_rq = intel_context_find_active_request(ce);
|
||||
} else {
|
||||
hung_rq = intel_engine_execlist_find_hung_request(engine);
|
||||
}
|
||||
|
||||
if (hung_rq)
|
||||
engine_dump_request(hung_rq, m, "\t\thung");
|
||||
else if (hung_ce)
|
||||
drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
|
||||
|
||||
if (guc)
|
||||
if (intel_uc_uses_guc_submission(&engine->gt->uc))
|
||||
intel_guc_dump_active_requests(engine, hung_rq, m);
|
||||
else
|
||||
intel_engine_dump_active_requests(&engine->sched_engine->requests,
|
||||
hung_rq, m);
|
||||
intel_execlists_dump_active_requests(engine, hung_rq, m);
|
||||
|
||||
if (hung_rq)
|
||||
i915_request_put(hung_rq);
|
||||
}
|
||||
|
||||
void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
|
@ -2239,7 +2223,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
struct i915_gpu_error * const error = &engine->i915->gpu_error;
|
||||
struct i915_request *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
unsigned long flags;
|
||||
ktime_t dummy;
|
||||
|
||||
if (header) {
|
||||
|
@ -2276,13 +2259,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
i915_reset_count(error));
|
||||
print_properties(engine, m);
|
||||
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
engine_dump_active_requests(engine, m);
|
||||
|
||||
drm_printf(m, "\tOn hold?: %lu\n",
|
||||
list_count(&engine->sched_engine->hold));
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
|
||||
|
||||
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
|
||||
if (wakeref) {
|
||||
|
@ -2328,8 +2306,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
|
|||
return siblings[0]->cops->create_virtual(siblings, count, flags);
|
||||
}
|
||||
|
||||
struct i915_request *
|
||||
intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
|
||||
static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_request *request, *active = NULL;
|
||||
|
||||
|
@ -2381,6 +2358,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
|
|||
return active;
|
||||
}
|
||||
|
||||
void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
|
||||
struct intel_context **ce, struct i915_request **rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
*ce = intel_engine_get_hung_context(engine);
|
||||
if (*ce) {
|
||||
intel_engine_clear_hung_context(engine);
|
||||
|
||||
*rq = intel_context_get_active_request(*ce);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Getting here with GuC enabled means it is a forced error capture
|
||||
* with no actual hang. So, no need to attempt the execlist search.
|
||||
*/
|
||||
if (intel_uc_uses_guc_submission(&engine->gt->uc))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
*rq = engine_execlist_find_hung_request(engine);
|
||||
if (*rq)
|
||||
*rq = i915_request_get_rcu(*rq);
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
|
||||
{
|
||||
/*
|
||||
|
|
|
@ -4148,6 +4148,33 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
|||
spin_unlock_irqrestore(&sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
static unsigned long list_count(struct list_head *list)
|
||||
{
|
||||
struct list_head *pos;
|
||||
unsigned long count = 0;
|
||||
|
||||
list_for_each(pos, list)
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
|
||||
struct i915_request *hung_rq,
|
||||
struct drm_printer *m)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
|
||||
intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
|
||||
|
||||
drm_printf(m, "\tOn hold?: %lu\n",
|
||||
list_count(&engine->sched_engine->hold));
|
||||
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftest_execlists.c"
|
||||
#endif
|
||||
|
|
|
@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
|||
int indent),
|
||||
unsigned int max);
|
||||
|
||||
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
|
||||
struct i915_request *hung_rq,
|
||||
struct drm_printer *m);
|
||||
|
||||
bool
|
||||
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
|
||||
|
||||
|
|
|
@ -1702,7 +1702,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
|
|||
goto next_context;
|
||||
|
||||
guilty = false;
|
||||
rq = intel_context_find_active_request(ce);
|
||||
rq = intel_context_get_active_request(ce);
|
||||
if (!rq) {
|
||||
head = ce->ring->tail;
|
||||
goto out_replay;
|
||||
|
@ -1715,6 +1715,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
|
|||
head = intel_ring_wrap(ce->ring, rq->head);
|
||||
|
||||
__i915_request_reset(rq, guilty);
|
||||
i915_request_put(rq);
|
||||
out_replay:
|
||||
guc_reset_state(ce, head, guilty);
|
||||
next_context:
|
||||
|
@ -4817,6 +4818,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
|
|||
|
||||
xa_lock_irqsave(&guc->context_lookup, flags);
|
||||
xa_for_each(&guc->context_lookup, index, ce) {
|
||||
bool found;
|
||||
|
||||
if (!kref_get_unless_zero(&ce->ref))
|
||||
continue;
|
||||
|
||||
|
@ -4833,10 +4836,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
|
|||
goto next;
|
||||
}
|
||||
|
||||
found = false;
|
||||
spin_lock(&ce->guc_state.lock);
|
||||
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
|
||||
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
|
||||
continue;
|
||||
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&ce->guc_state.lock);
|
||||
|
||||
if (found) {
|
||||
intel_engine_set_hung_context(engine, ce);
|
||||
|
||||
/* Can only cope with one hang at a time... */
|
||||
|
@ -4844,6 +4855,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
|
|||
xa_lock(&guc->context_lookup);
|
||||
goto done;
|
||||
}
|
||||
|
||||
next:
|
||||
intel_context_put(ce);
|
||||
xa_lock(&guc->context_lookup);
|
||||
|
|
|
@ -1596,43 +1596,20 @@ capture_engine(struct intel_engine_cs *engine,
|
|||
{
|
||||
struct intel_engine_capture_vma *capture = NULL;
|
||||
struct intel_engine_coredump *ee;
|
||||
struct intel_context *ce;
|
||||
struct intel_context *ce = NULL;
|
||||
struct i915_request *rq = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
|
||||
if (!ee)
|
||||
return NULL;
|
||||
|
||||
ce = intel_engine_get_hung_context(engine);
|
||||
if (ce) {
|
||||
intel_engine_clear_hung_context(engine);
|
||||
rq = intel_context_find_active_request(ce);
|
||||
if (!rq || !i915_request_started(rq))
|
||||
goto no_request_capture;
|
||||
} else {
|
||||
/*
|
||||
* Getting here with GuC enabled means it is a forced error capture
|
||||
* with no actual hang. So, no need to attempt the execlist search.
|
||||
*/
|
||||
if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
rq = intel_engine_execlist_find_hung_request(engine);
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock,
|
||||
flags);
|
||||
}
|
||||
}
|
||||
if (rq)
|
||||
rq = i915_request_get_rcu(rq);
|
||||
|
||||
if (!rq)
|
||||
intel_engine_get_hung_entity(engine, &ce, &rq);
|
||||
if (!rq || !i915_request_started(rq))
|
||||
goto no_request_capture;
|
||||
|
||||
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
|
||||
if (!capture) {
|
||||
i915_request_put(rq);
|
||||
if (!capture)
|
||||
goto no_request_capture;
|
||||
}
|
||||
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
|
||||
intel_guc_capture_get_matching_node(engine->gt, ee, ce);
|
||||
|
||||
|
@ -1642,6 +1619,8 @@ capture_engine(struct intel_engine_cs *engine,
|
|||
return ee;
|
||||
|
||||
no_request_capture:
|
||||
if (rq)
|
||||
i915_request_put(rq);
|
||||
kfree(ee);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@ int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
|
|||
int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
|
||||
|
|
|
@ -151,6 +151,9 @@ nvkm_firmware_mem_page(struct nvkm_memory *memory)
|
|||
static enum nvkm_memory_target
|
||||
nvkm_firmware_mem_target(struct nvkm_memory *memory)
|
||||
{
|
||||
if (nvkm_firmware_mem(memory)->device->func->tegra)
|
||||
return NVKM_MEM_TARGET_NCOH;
|
||||
|
||||
return NVKM_MEM_TARGET_HOST;
|
||||
}
|
||||
|
||||
|
|
|
@ -2405,7 +2405,7 @@ nv162_chipset = {
|
|||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
|
@ -2440,7 +2440,7 @@ nv164_chipset = {
|
|||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
|
@ -2475,7 +2475,7 @@ nv166_chipset = {
|
|||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
|
@ -2510,7 +2510,7 @@ nv167_chipset = {
|
|||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
|
@ -2545,7 +2545,7 @@ nv168_chipset = {
|
|||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
|
|
|
@ -48,6 +48,16 @@ gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
|
|||
img += 4;
|
||||
len -= 4;
|
||||
}
|
||||
|
||||
/* Sigh. Tegra PMU FW's init message... */
|
||||
if (len) {
|
||||
u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
|
||||
|
||||
while (len--) {
|
||||
*(u8 *)img++ = data & 0xff;
|
||||
data >>= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -64,6 +74,8 @@ gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
|
|||
img += 4;
|
||||
len -= 4;
|
||||
}
|
||||
|
||||
WARN_ON(len);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -74,7 +86,7 @@ gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 d
|
|||
|
||||
const struct nvkm_falcon_func_pio
|
||||
gm200_flcn_dmem_pio = {
|
||||
.min = 4,
|
||||
.min = 1,
|
||||
.max = 0x100,
|
||||
.wr_init = gm200_flcn_pio_dmem_wr_init,
|
||||
.wr = gm200_flcn_pio_dmem_wr,
|
||||
|
|
|
@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
tu102_devinit_wait(struct nvkm_device *device)
|
||||
{
|
||||
unsigned timeout = 50 + 2000;
|
||||
|
||||
do {
|
||||
if (nvkm_rd32(device, 0x118128) & 0x00000001) {
|
||||
if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
|
||||
return 0;
|
||||
}
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
} while (timeout--);
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int
|
||||
tu102_devinit_post(struct nvkm_devinit *base, bool post)
|
||||
{
|
||||
struct nv50_devinit *init = nv50_devinit(base);
|
||||
int ret;
|
||||
|
||||
ret = tu102_devinit_wait(init->base.subdev.device);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gm200_devinit_preos(init, post);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ nvkm-y += nvkm/subdev/fb/gp100.o
|
|||
nvkm-y += nvkm/subdev/fb/gp102.o
|
||||
nvkm-y += nvkm/subdev/fb/gp10b.o
|
||||
nvkm-y += nvkm/subdev/fb/gv100.o
|
||||
nvkm-y += nvkm/subdev/fb/tu102.o
|
||||
nvkm-y += nvkm/subdev/fb/ga100.o
|
||||
nvkm-y += nvkm/subdev/fb/ga102.o
|
||||
|
||||
|
|
|
@ -40,12 +40,6 @@ ga102_fb_vpr_scrub(struct nvkm_fb *fb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
ga102_fb_vpr_scrub_required(struct nvkm_fb *fb)
|
||||
{
|
||||
return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
ga102_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
|
@ -56,7 +50,7 @@ ga102_fb = {
|
|||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.ram_new = ga102_ram_new,
|
||||
.default_bigpage = 16,
|
||||
.vpr.scrub_required = ga102_fb_vpr_scrub_required,
|
||||
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
|
||||
.vpr.scrub = ga102_fb_vpr_scrub,
|
||||
};
|
||||
|
||||
|
|
|
@ -49,8 +49,3 @@ gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, s
|
|||
}
|
||||
|
||||
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
|
||||
|
|
|
@ -89,4 +89,6 @@ bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
|
|||
int gp102_fb_vpr_scrub(struct nvkm_fb *);
|
||||
|
||||
int gv100_fb_init_page(struct nvkm_fb *);
|
||||
|
||||
bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
bool
|
||||
tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
|
||||
{
|
||||
return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
tu102_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gm200_fb_init,
|
||||
.init_page = gv100_fb_init_page,
|
||||
.init_unkn = gp100_fb_init_unkn,
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
|
||||
.vpr.scrub = gp102_fb_vpr_scrub,
|
||||
.ram_new = gp100_ram_new,
|
||||
.default_bigpage = 16,
|
||||
};
|
||||
|
||||
int
|
||||
tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
|
||||
{
|
||||
return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
|
||||
}
|
||||
|
||||
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
|
|
@ -225,7 +225,7 @@ gm20b_pmu_init(struct nvkm_pmu *pmu)
|
|||
|
||||
pmu->initmsg_received = false;
|
||||
|
||||
nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
|
||||
nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
|
||||
nvkm_falcon_start(falcon);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1193,14 +1193,11 @@ static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int boe_panel_unprepare(struct drm_panel *panel)
|
||||
static int boe_panel_disable(struct drm_panel *panel)
|
||||
{
|
||||
struct boe_panel *boe = to_boe_panel(panel);
|
||||
int ret;
|
||||
|
||||
if (!boe->prepared)
|
||||
return 0;
|
||||
|
||||
ret = boe_panel_enter_sleep_mode(boe);
|
||||
if (ret < 0) {
|
||||
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
|
||||
|
@ -1209,6 +1206,16 @@ static int boe_panel_unprepare(struct drm_panel *panel)
|
|||
|
||||
msleep(150);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int boe_panel_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct boe_panel *boe = to_boe_panel(panel);
|
||||
|
||||
if (!boe->prepared)
|
||||
return 0;
|
||||
|
||||
if (boe->desc->discharge_on_disable) {
|
||||
regulator_disable(boe->avee);
|
||||
regulator_disable(boe->avdd);
|
||||
|
@ -1528,6 +1535,7 @@ static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *pa
|
|||
}
|
||||
|
||||
static const struct drm_panel_funcs boe_panel_funcs = {
|
||||
.disable = boe_panel_disable,
|
||||
.unprepare = boe_panel_unprepare,
|
||||
.prepare = boe_panel_prepare,
|
||||
.enable = boe_panel_enable,
|
||||
|
|
|
@ -656,18 +656,8 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
|
|||
.atomic_check = drm_crtc_helper_atomic_check,
|
||||
};
|
||||
|
||||
static void ssd130x_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *drm = crtc->dev;
|
||||
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
|
||||
|
||||
ssd130x_init(ssd130x);
|
||||
|
||||
drm_atomic_helper_crtc_reset(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
|
||||
.reset = ssd130x_crtc_reset,
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.destroy = drm_crtc_cleanup,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
|
@ -686,6 +676,12 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
ret = ssd130x_init(ssd130x);
|
||||
if (ret) {
|
||||
ssd130x_power_off(ssd130x);
|
||||
return;
|
||||
}
|
||||
|
||||
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
|
||||
|
||||
backlight_enable(ssd130x->bl_dev);
|
||||
|
|
|
@ -3018,7 +3018,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
|
|||
}
|
||||
|
||||
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
|
||||
vc4_hdmi, "vc4",
|
||||
vc4_hdmi,
|
||||
vc4_hdmi->variant->card_name,
|
||||
CEC_CAP_DEFAULTS |
|
||||
CEC_CAP_CONNECTOR_INFO, 1);
|
||||
ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
|
||||
|
|
|
@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
|||
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
|
||||
if (cl_data->num_hid_devices == 0)
|
||||
return -ENODEV;
|
||||
cl_data->is_any_sensor_enabled = false;
|
||||
|
||||
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
|
||||
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
|
||||
|
@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
|||
status = amd_sfh_wait_for_response
|
||||
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
|
||||
if (status == SENSOR_ENABLED) {
|
||||
cl_data->is_any_sensor_enabled = true;
|
||||
cl_data->sensor_sts[i] = SENSOR_ENABLED;
|
||||
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
|
||||
if (rc) {
|
||||
|
@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
|||
cl_data->sensor_sts[i]);
|
||||
goto cleanup;
|
||||
}
|
||||
} else {
|
||||
cl_data->sensor_sts[i] = SENSOR_DISABLED;
|
||||
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
|
||||
cl_data->sensor_idx[i],
|
||||
get_sensor_name(cl_data->sensor_idx[i]),
|
||||
cl_data->sensor_sts[i]);
|
||||
}
|
||||
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
|
||||
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
|
||||
cl_data->sensor_sts[i]);
|
||||
}
|
||||
if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
|
||||
if (!cl_data->is_any_sensor_enabled ||
|
||||
(mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
|
||||
amd_sfh_hid_client_deinit(privdata);
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
devm_kfree(dev, cl_data->feature_report[i]);
|
||||
devm_kfree(dev, in_data->input_report[i]);
|
||||
devm_kfree(dev, cl_data->report_descr[i]);
|
||||
}
|
||||
dev_warn(dev, "Failed to discover, sensors not enabled\n");
|
||||
dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
|
||||
|
|
|
@ -32,6 +32,7 @@ struct amd_input_data {
|
|||
struct amdtp_cl_data {
|
||||
u8 init_done;
|
||||
u32 cur_hid_dev;
|
||||
bool is_any_sensor_enabled;
|
||||
u32 hid_dev_count;
|
||||
u32 num_hid_devices;
|
||||
struct device_info *hid_devices;
|
||||
|
|
|
@ -1202,6 +1202,7 @@ int hid_open_report(struct hid_device *device)
|
|||
__u8 *end;
|
||||
__u8 *next;
|
||||
int ret;
|
||||
int i;
|
||||
static int (*dispatch_type[])(struct hid_parser *parser,
|
||||
struct hid_item *item) = {
|
||||
hid_parser_main,
|
||||
|
@ -1252,6 +1253,8 @@ int hid_open_report(struct hid_device *device)
|
|||
goto err;
|
||||
}
|
||||
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
|
||||
for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
|
||||
device->collection[i].parent_idx = -1;
|
||||
|
||||
ret = -EINVAL;
|
||||
while ((next = fetch_item(start, end, &item)) != NULL) {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
|
||||
* Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
|
||||
* Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
|
||||
* Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net>
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
case USB_DEVICE_ID_ELECOM_M_DT1URBK:
|
||||
case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
|
||||
case USB_DEVICE_ID_ELECOM_M_HT1URBK:
|
||||
case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
|
||||
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
|
||||
/*
|
||||
* Report descriptor format:
|
||||
* 12: button bit count
|
||||
|
@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
*/
|
||||
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
|
||||
break;
|
||||
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
|
||||
/*
|
||||
* Report descriptor format:
|
||||
* 22: button bit count
|
||||
* 30: padding bit count
|
||||
* 24: button report size
|
||||
* 16: button usage maximum
|
||||
*/
|
||||
mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
|
||||
break;
|
||||
}
|
||||
return rdesc;
|
||||
}
|
||||
|
@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, elecom_devices);
|
||||
|
|
|
@ -413,6 +413,8 @@
|
|||
#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
|
||||
#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
|
||||
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
|
||||
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
|
||||
#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
|
||||
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
|
||||
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
|
||||
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
|
||||
|
@ -428,7 +430,8 @@
|
|||
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
|
||||
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
|
||||
#define USB_DEVICE_ID_ELECOM_M_HT1URBK 0x010c
|
||||
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK 0x010d
|
||||
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
|
||||
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C 0x011c
|
||||
|
||||
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
|
||||
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
|
||||
|
|
|
@ -370,6 +370,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
|||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
|
||||
|
@ -384,6 +386,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
|||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
|
||||
|
|
|
@ -3978,7 +3978,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
|
|||
}
|
||||
|
||||
hidpp_initialize_battery(hidpp);
|
||||
hidpp_initialize_hires_scroll(hidpp);
|
||||
if (!hid_is_usb(hidpp->hid_dev))
|
||||
hidpp_initialize_hires_scroll(hidpp);
|
||||
|
||||
/* forward current battery state */
|
||||
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
|
||||
|
|
|
@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_HID_ELO)
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
|
||||
|
|
|
@ -1963,7 +1963,7 @@ static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
|
|||
|
||||
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
|
||||
{
|
||||
debugfs_remove(debugfs_lookup("hv-balloon", NULL));
|
||||
debugfs_lookup_and_remove("hv-balloon", NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -280,6 +280,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
|
|||
hid_sensor_convert_timestamp(
|
||||
&accel_state->common_attributes,
|
||||
*(int64_t *)raw_data);
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -298,8 +298,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
|
|||
int ret;
|
||||
|
||||
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
|
||||
if (!indio_dev)
|
||||
if (!indio_dev) {
|
||||
of_node_put(parent_np);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv = iio_priv(indio_dev);
|
||||
|
||||
|
|
|
@ -86,6 +86,8 @@
|
|||
|
||||
#define IMX8QXP_ADC_TIMEOUT msecs_to_jiffies(100)
|
||||
|
||||
#define IMX8QXP_ADC_MAX_FIFO_SIZE 16
|
||||
|
||||
struct imx8qxp_adc {
|
||||
struct device *dev;
|
||||
void __iomem *regs;
|
||||
|
@ -95,6 +97,7 @@ struct imx8qxp_adc {
|
|||
/* Serialise ADC channel reads */
|
||||
struct mutex lock;
|
||||
struct completion completion;
|
||||
u32 fifo[IMX8QXP_ADC_MAX_FIFO_SIZE];
|
||||
};
|
||||
|
||||
#define IMX8QXP_ADC_CHAN(_idx) { \
|
||||
|
@ -238,8 +241,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
*val = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
|
||||
readl(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
|
||||
*val = adc->fifo[0];
|
||||
|
||||
mutex_unlock(&adc->lock);
|
||||
return IIO_VAL_INT;
|
||||
|
@ -265,10 +267,15 @@ static irqreturn_t imx8qxp_adc_isr(int irq, void *dev_id)
|
|||
{
|
||||
struct imx8qxp_adc *adc = dev_id;
|
||||
u32 fifo_count;
|
||||
int i;
|
||||
|
||||
fifo_count = FIELD_GET(IMX8QXP_ADC_FCTRL_FCOUNT_MASK,
|
||||
readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL));
|
||||
|
||||
for (i = 0; i < fifo_count; i++)
|
||||
adc->fifo[i] = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
|
||||
readl_relaxed(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
|
||||
|
||||
if (fifo_count)
|
||||
complete(&adc->completion);
|
||||
|
||||
|
|
|
@ -1520,6 +1520,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
|
|||
},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
|
||||
|
||||
static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
|
|
@ -57,6 +57,18 @@
|
|||
#define TWL6030_GPADCS BIT(1)
|
||||
#define TWL6030_GPADCR BIT(0)
|
||||
|
||||
#define USB_VBUS_CTRL_SET 0x04
|
||||
#define USB_ID_CTRL_SET 0x06
|
||||
|
||||
#define TWL6030_MISC1 0xE4
|
||||
#define VBUS_MEAS 0x01
|
||||
#define ID_MEAS 0x01
|
||||
|
||||
#define VAC_MEAS 0x04
|
||||
#define VBAT_MEAS 0x02
|
||||
#define BB_MEAS 0x01
|
||||
|
||||
|
||||
/**
|
||||
* struct twl6030_chnl_calib - channel calibration
|
||||
* @gain: slope coefficient for ideal curve
|
||||
|
@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to wire up inputs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to wire up inputs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
|
||||
VBAT_MEAS | BB_MEAS | VAC_MEAS,
|
||||
TWL6030_MISC1);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to wire up inputs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
indio_dev->name = DRIVER_NAME;
|
||||
indio_dev->info = &twl6030_gpadc_iio_info;
|
||||
indio_dev->modes = INDIO_DIRECT_MODE;
|
||||
|
|
|
@ -1329,7 +1329,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
|
|||
|
||||
dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
|
||||
if (!dev_channels)
|
||||
ret = -ENOMEM;
|
||||
return -ENOMEM;
|
||||
|
||||
indio_dev->channels = dev_channels;
|
||||
indio_dev->num_channels = num_channels;
|
||||
|
|
|
@ -231,6 +231,7 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
|
|||
gyro_state->timestamp =
|
||||
hid_sensor_convert_timestamp(&gyro_state->common_attributes,
|
||||
*(s64 *)raw_data);
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/regmap.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/sysfs.h>
|
||||
|
@ -144,9 +145,8 @@
|
|||
#define FXOS8700_NVM_DATA_BNK0 0xa7
|
||||
|
||||
/* Bit definitions for FXOS8700_CTRL_REG1 */
|
||||
#define FXOS8700_CTRL_ODR_MSK 0x38
|
||||
#define FXOS8700_CTRL_ODR_MAX 0x00
|
||||
#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
|
||||
#define FXOS8700_CTRL_ODR_MSK GENMASK(5, 3)
|
||||
|
||||
/* Bit definitions for FXOS8700_M_CTRL_REG1 */
|
||||
#define FXOS8700_HMS_MASK GENMASK(1, 0)
|
||||
|
@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
|
|||
switch (iio_type) {
|
||||
case IIO_ACCEL:
|
||||
return FXOS8700_ACCEL;
|
||||
case IIO_ANGL_VEL:
|
||||
case IIO_MAGN:
|
||||
return FXOS8700_MAGN;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data,
|
|||
static int fxos8700_set_scale(struct fxos8700_data *data,
|
||||
enum fxos8700_sensor t, int uscale)
|
||||
{
|
||||
int i;
|
||||
int i, ret, val;
|
||||
bool active_mode;
|
||||
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
|
||||
struct device *dev = regmap_get_device(data->regmap);
|
||||
|
||||
if (t == FXOS8700_MAGN) {
|
||||
dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
|
||||
dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* When device is in active mode, it failed to set an ACCEL
|
||||
* full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG.
|
||||
* This is not align with the datasheet, but it is a fxos8700
|
||||
* chip behavier. Set the device in standby mode before setting
|
||||
* an ACCEL full-scale range.
|
||||
*/
|
||||
ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
active_mode = val & FXOS8700_ACTIVE;
|
||||
if (active_mode) {
|
||||
ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
|
||||
val & ~FXOS8700_ACTIVE);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < scale_num; i++)
|
||||
if (fxos8700_accel_scale[i].uscale == uscale)
|
||||
break;
|
||||
|
@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data,
|
|||
if (i == scale_num)
|
||||
return -EINVAL;
|
||||
|
||||
return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
|
||||
ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
|
||||
fxos8700_accel_scale[i].bits);
|
||||
if (ret)
|
||||
return ret;
|
||||
return regmap_write(data->regmap, FXOS8700_CTRL_REG1,
|
||||
active_mode);
|
||||
}
|
||||
|
||||
static int fxos8700_get_scale(struct fxos8700_data *data,
|
||||
|
@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data,
|
|||
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
|
||||
|
||||
if (t == FXOS8700_MAGN) {
|
||||
*uscale = 1200; /* Magnetometer is locked at 1200uT */
|
||||
*uscale = 1000; /* Magnetometer is locked at 0.001Gs */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
|
|||
int axis, int *val)
|
||||
{
|
||||
u8 base, reg;
|
||||
s16 tmp;
|
||||
int ret;
|
||||
enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
|
||||
|
||||
base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
|
||||
/*
|
||||
* Different register base addresses varies with channel types.
|
||||
* This bug hasn't been noticed before because using an enum is
|
||||
* really hard to read. Use an a switch statement to take over that.
|
||||
*/
|
||||
switch (chan_type) {
|
||||
case IIO_ACCEL:
|
||||
base = FXOS8700_OUT_X_MSB;
|
||||
break;
|
||||
case IIO_MAGN:
|
||||
base = FXOS8700_M_OUT_X_MSB;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Block read 6 bytes of device output registers to avoid data loss */
|
||||
ret = regmap_bulk_read(data->regmap, base, data->buf,
|
||||
FXOS8700_DATA_BUF_SIZE);
|
||||
sizeof(data->buf));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Convert axis to buffer index */
|
||||
reg = axis - IIO_MOD_X;
|
||||
|
||||
/*
|
||||
* Convert to native endianness. The accel data and magn data
|
||||
* are signed, so a forced type conversion is needed.
|
||||
*/
|
||||
tmp = be16_to_cpu(data->buf[reg]);
|
||||
|
||||
/*
|
||||
* ACCEL output data registers contain the X-axis, Y-axis, and Z-axis
|
||||
* 14-bit left-justified sample data and MAGN output data registers
|
||||
* contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply
|
||||
* a signed 2 bits right shift to the readback raw data from ACCEL
|
||||
* output data register and keep that from MAGN sensor as the origin.
|
||||
* Value should be extended to 32 bit.
|
||||
*/
|
||||
switch (chan_type) {
|
||||
case IIO_ACCEL:
|
||||
tmp = tmp >> 2;
|
||||
break;
|
||||
case IIO_MAGN:
|
||||
/* Nothing to do */
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Convert to native endianness */
|
||||
*val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
|
||||
*val = sign_extend32(tmp, 15);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
|
|||
if (i >= odr_num)
|
||||
return -EINVAL;
|
||||
|
||||
return regmap_update_bits(data->regmap,
|
||||
FXOS8700_CTRL_REG1,
|
||||
FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
|
||||
fxos8700_odr[i].bits << 3 | active_mode);
|
||||
val &= ~FXOS8700_CTRL_ODR_MSK;
|
||||
val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE;
|
||||
return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val);
|
||||
}
|
||||
|
||||
static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
|
||||
|
@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
val &= FXOS8700_CTRL_ODR_MSK;
|
||||
val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val);
|
||||
|
||||
for (i = 0; i < odr_num; i++)
|
||||
if (val == fxos8700_odr[i].bits)
|
||||
|
@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
|
|||
static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
|
||||
"1.5625 6.25 12.5 50 100 200 400 800");
|
||||
static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
|
||||
static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
|
||||
static IIO_CONST_ATTR(in_magn_scale_available, "0.001000");
|
||||
|
||||
static struct attribute *fxos8700_attrs[] = {
|
||||
&iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
|
||||
|
@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Max ODR (800Hz individual or 400Hz hybrid), active mode */
|
||||
ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
|
||||
FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
|
||||
/*
|
||||
* Set max full-scale range (+/-8G) for ACCEL sensor in chip
|
||||
* initialization then activate the device.
|
||||
*/
|
||||
ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Set for max full-scale range (+/-8G) */
|
||||
return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
|
||||
/* Max ODR (800Hz individual or 400Hz hybrid), active mode */
|
||||
return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1,
|
||||
FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE,
|
||||
FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) |
|
||||
FXOS8700_ACTIVE);
|
||||
}
|
||||
|
||||
static void fxos8700_chip_uninit(void *data)
|
||||
|
|
|
@ -4,6 +4,7 @@ config IIO_ST_LSM6DSX
|
|||
tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors"
|
||||
depends on (I2C || SPI || I3C)
|
||||
select IIO_BUFFER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
select IIO_KFIFO_BUF
|
||||
select IIO_ST_LSM6DSX_I2C if (I2C)
|
||||
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
|
||||
|
|
|
@ -440,6 +440,8 @@ static int cm32181_probe(struct i2c_client *client)
|
|||
if (!indio_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
i2c_set_clientdata(client, indio_dev);
|
||||
|
||||
/*
|
||||
* Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
|
||||
* SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
|
||||
|
@ -460,8 +462,6 @@ static int cm32181_probe(struct i2c_client *client)
|
|||
return PTR_ERR(client);
|
||||
}
|
||||
|
||||
i2c_set_clientdata(client, indio_dev);
|
||||
|
||||
cm32181 = iio_priv(indio_dev);
|
||||
cm32181->client = client;
|
||||
cm32181->dev = dev;
|
||||
|
@ -490,7 +490,8 @@ static int cm32181_probe(struct i2c_client *client)
|
|||
|
||||
static int cm32181_suspend(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
|
||||
struct i2c_client *client = cm32181->client;
|
||||
|
||||
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
|
||||
CM32181_CMD_ALS_DISABLE);
|
||||
|
@ -498,8 +499,8 @@ static int cm32181_suspend(struct device *dev)
|
|||
|
||||
static int cm32181_resume(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
|
||||
struct i2c_client *client = cm32181->client;
|
||||
|
||||
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
|
||||
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
|
||||
|
|
|
@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
|
|||
|
||||
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
|
||||
bonding_debug_root, bond->dev->name);
|
||||
if (d) {
|
||||
if (!IS_ERR(d)) {
|
||||
bond->debug_dir = d;
|
||||
} else {
|
||||
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
|
||||
|
|
|
@ -1302,14 +1302,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
|
|||
if (!priv->ports[port].pvid)
|
||||
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
|
||||
MT7530_VLAN_ACC_TAGGED);
|
||||
}
|
||||
|
||||
/* Set the port as a user port which is to be able to recognize VID
|
||||
* from incoming packets before fetching entry within the VLAN table.
|
||||
*/
|
||||
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
|
||||
VLAN_ATTR(MT7530_VLAN_USER) |
|
||||
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
|
||||
/* Set the port as a user port which is to be able to recognize
|
||||
* VID from incoming packets before fetching entry within the
|
||||
* VLAN table.
|
||||
*/
|
||||
mt7530_rmw(priv, MT7530_PVC_P(port),
|
||||
VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
|
||||
VLAN_ATTR(MT7530_VLAN_USER) |
|
||||
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
|
||||
} else {
|
||||
/* Also set CPU ports to the "user" VLAN port attribute, to
|
||||
* allow VLAN classification, but keep the EG_TAG attribute as
|
||||
* "consistent" (i.o.w. don't change its value) for packets
|
||||
* received by the switch from the CPU, so that tagged packets
|
||||
* are forwarded to user ports as tagged, and untagged as
|
||||
* untagged.
|
||||
*/
|
||||
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
|
||||
VLAN_ATTR(MT7530_VLAN_USER));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -4684,25 +4684,26 @@ static int init_reset_optional(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return dev_err_probe(&pdev->dev, ret,
|
||||
"failed to init SGMII PHY\n");
|
||||
}
|
||||
|
||||
ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
|
||||
if (!ret) {
|
||||
u32 pm_info[2];
|
||||
ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
|
||||
if (!ret) {
|
||||
u32 pm_info[2];
|
||||
|
||||
ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
|
||||
pm_info, ARRAY_SIZE(pm_info));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to read power management information\n");
|
||||
goto err_out_phy_exit;
|
||||
ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
|
||||
pm_info, ARRAY_SIZE(pm_info));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to read power management information\n");
|
||||
goto err_out_phy_exit;
|
||||
}
|
||||
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
|
||||
if (ret)
|
||||
goto err_out_phy_exit;
|
||||
|
||||
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
|
||||
if (ret)
|
||||
goto err_out_phy_exit;
|
||||
}
|
||||
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
|
||||
if (ret)
|
||||
goto err_out_phy_exit;
|
||||
|
||||
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
|
||||
if (ret)
|
||||
goto err_out_phy_exit;
|
||||
}
|
||||
|
||||
/* Fully reset controller at hardware level if mapped in device tree */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue