Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
5b91c5cc0e
|
@ -100,6 +100,8 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||
|
|
|
@ -242,7 +242,7 @@ example:
|
|||
|
||||
int rectangle_area(struct shape *this)
|
||||
{
|
||||
struct rectangle *self = container_of(this, struct shape, parent);
|
||||
struct rectangle *self = container_of(this, struct rectangle, parent);
|
||||
|
||||
return self->length * self->width;
|
||||
};
|
||||
|
|
|
@ -462,6 +462,10 @@ operation table looks like the following::
|
|||
struct iov_iter *iter,
|
||||
netfs_io_terminated_t term_func,
|
||||
void *term_func_priv);
|
||||
|
||||
int (*query_occupancy)(struct netfs_cache_resources *cres,
|
||||
loff_t start, size_t len, size_t granularity,
|
||||
loff_t *_data_start, size_t *_data_len);
|
||||
};
|
||||
|
||||
With a termination handler function pointer::
|
||||
|
@ -536,6 +540,18 @@ The methods defined in the table are:
|
|||
indicating whether the termination is definitely happening in the caller's
|
||||
context.
|
||||
|
||||
* ``query_occupancy()``
|
||||
|
||||
[Required] Called to find out where the next piece of data is within a
|
||||
particular region of the cache. The start and length of the region to be
|
||||
queried are passed in, along with the granularity to which the answer needs
|
||||
to be aligned. The function passes back the start and length of the data,
|
||||
if any, available within that region. Note that there may be a hole at the
|
||||
front.
|
||||
|
||||
It returns 0 if some data was found, -ENODATA if there was no usable data
|
||||
within the region or -ENOBUFS if there is no caching on this file.
|
||||
|
||||
Note that these methods are passed a pointer to the cache resource structure,
|
||||
not the read request structure as they could be used in other situations where
|
||||
there isn't a read request structure as well, such as writing dirty data to the
|
||||
|
|
|
@ -300,30 +300,6 @@ Contact: Daniel Vetter, Noralf Tronnes
|
|||
|
||||
Level: Advanced
|
||||
|
||||
Garbage collect fbdev scrolling acceleration
|
||||
--------------------------------------------
|
||||
|
||||
Scroll acceleration has been disabled in fbcon. Now it works as the old
|
||||
SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
|
||||
removed from fbcon_ops.
|
||||
Remaining tasks:
|
||||
|
||||
- a bunch of the hooks in fbcon_ops could be removed or simplified by calling
|
||||
directly instead of the function table (with a switch on p->rotate)
|
||||
|
||||
- fb_copyarea is unused after this, and can be deleted from all drivers
|
||||
|
||||
- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
|
||||
well as cfb_copyarea
|
||||
|
||||
Note that not all acceleration code can be deleted, since clearing and cursor
|
||||
support is still accelerated, which might be good candidates for further
|
||||
deletion projects.
|
||||
|
||||
Contact: Daniel Vetter
|
||||
|
||||
Level: Intermediate
|
||||
|
||||
idr_init_base()
|
||||
---------------
|
||||
|
||||
|
|
|
@ -115,6 +115,7 @@ Code Seq# Include File Comments
|
|||
'B' 00-1F linux/cciss_ioctl.h conflict!
|
||||
'B' 00-0F include/linux/pmu.h conflict!
|
||||
'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
|
||||
'B' 00-0F xen/xenbus_dev.h conflict!
|
||||
'C' all linux/soundcard.h conflict!
|
||||
'C' 01-2F linux/capi.h conflict!
|
||||
'C' F0-FF drivers/net/wan/cosa.h conflict!
|
||||
|
@ -134,6 +135,7 @@ Code Seq# Include File Comments
|
|||
'F' 80-8F linux/arcfb.h conflict!
|
||||
'F' DD video/sstfb.h conflict!
|
||||
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
|
||||
'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
|
||||
'H' 00-7F linux/hiddev.h conflict!
|
||||
'H' 00-0F linux/hidraw.h conflict!
|
||||
'H' 01 linux/mei.h conflict!
|
||||
|
@ -176,6 +178,7 @@ Code Seq# Include File Comments
|
|||
'P' 60-6F sound/sscape_ioctl.h conflict!
|
||||
'P' 00-0F drivers/usb/class/usblp.c conflict!
|
||||
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
|
||||
'P' 00-0F xen/privcmd.h conflict!
|
||||
'Q' all linux/soundcard.h
|
||||
'R' 00-1F linux/random.h conflict!
|
||||
'R' 01 linux/rfkill.h conflict!
|
||||
|
|
22
MAINTAINERS
22
MAINTAINERS
|
@ -10881,6 +10881,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
|||
F: drivers/ata/pata_arasan_cf.c
|
||||
F: include/linux/pata_arasan_cf_data.h
|
||||
|
||||
LIBATA PATA DRIVERS
|
||||
R: Sergey Shtylyov <s.shtylyov@omp.ru>
|
||||
L: linux-ide@vger.kernel.org
|
||||
F: drivers/ata/ata_*.c
|
||||
F: drivers/ata/pata_*.c
|
||||
|
||||
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
L: linux-ide@vger.kernel.org
|
||||
|
@ -12401,7 +12407,7 @@ F: include/uapi/linux/membarrier.h
|
|||
F: kernel/sched/membarrier.c
|
||||
|
||||
MEMBLOCK
|
||||
M: Mike Rapoport <rppt@linux.ibm.com>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/core-api/boot-time-mm.rst
|
||||
|
@ -13299,8 +13305,8 @@ W: http://www.iptables.org/
|
|||
W: http://www.nftables.org/
|
||||
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
|
||||
C: irc://irc.libera.chat/netfilter
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next.git
|
||||
F: include/linux/netfilter*
|
||||
F: include/linux/netfilter/
|
||||
F: include/net/netfilter/
|
||||
|
@ -13567,7 +13573,7 @@ F: tools/testing/selftests/nci/
|
|||
|
||||
NFS, SUNRPC, AND LOCKD CLIENTS
|
||||
M: Trond Myklebust <trond.myklebust@hammerspace.com>
|
||||
M: Anna Schumaker <anna.schumaker@netapp.com>
|
||||
M: Anna Schumaker <anna@kernel.org>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://client.linux-nfs.org
|
||||
|
@ -16469,6 +16475,14 @@ F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
|
|||
F: drivers/i2c/busses/i2c-rcar.c
|
||||
F: drivers/i2c/busses/i2c-sh_mobile.c
|
||||
|
||||
RENESAS R-CAR SATA DRIVER
|
||||
R: Sergey Shtylyov <s.shtylyov@omp.ru>
|
||||
S: Supported
|
||||
L: linux-ide@vger.kernel.org
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
|
||||
F: drivers/ata/sata_rcar.c
|
||||
|
||||
RENESAS R-CAR THERMAL DRIVERS
|
||||
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
|
||||
L: linux-renesas-soc@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Gobble Gobble
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -13,12 +13,12 @@
|
|||
static int crypto_blake2s_update_arm(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
|
||||
return crypto_blake2s_update(desc, in, inlen, false);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress);
|
||||
return crypto_blake2s_final(desc, out, false);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
|
|
@ -680,6 +680,22 @@ config ARM64_ERRATUM_2051678
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2077057
|
||||
bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A510 erratum 2077057.
|
||||
Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
|
||||
expected, but a Pointer Authentication trap is taken instead. The
|
||||
erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
|
||||
EL1 to cause a return to EL2 with a guest controlled ELR_EL2.
|
||||
|
||||
This can only happen when EL2 is stepping EL1.
|
||||
|
||||
When these conditions occur, the SPSR_EL2 value is unchanged from the
|
||||
previous guest entry, and can be restored from the in-memory copy.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2119858
|
||||
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
|
||||
default y
|
||||
|
|
|
@ -600,6 +600,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2077057
|
||||
{
|
||||
.desc = "ARM erratum 2077057",
|
||||
.capability = ARM64_WORKAROUND_2077057,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2064142
|
||||
{
|
||||
.desc = "ARM erratum 2064142",
|
||||
|
|
|
@ -797,6 +797,24 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
|
|||
xfer_to_guest_mode_work_pending();
|
||||
}
|
||||
|
||||
/*
|
||||
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
|
||||
* the vCPU is running.
|
||||
*
|
||||
* This must be noinstr as instrumentation may make use of RCU, and this is not
|
||||
* safe during the EQS.
|
||||
*/
|
||||
static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
|
||||
guest_state_exit_irqoff();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
|
||||
* @vcpu: The VCPU pointer
|
||||
|
@ -881,9 +899,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
* Enter the guest
|
||||
*/
|
||||
trace_kvm_entry(*vcpu_pc(vcpu));
|
||||
guest_enter_irqoff();
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
|
||||
ret = kvm_arm_vcpu_enter_exit(vcpu);
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
vcpu->stat.exits++;
|
||||
|
@ -918,26 +936,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
kvm_arch_vcpu_ctxsync_fp(vcpu);
|
||||
|
||||
/*
|
||||
* We may have taken a host interrupt in HYP mode (ie
|
||||
* while executing the guest). This interrupt is still
|
||||
* pending, as we haven't serviced it yet!
|
||||
* We must ensure that any pending interrupts are taken before
|
||||
* we exit guest timing so that timer ticks are accounted as
|
||||
* guest time. Transiently unmask interrupts so that any
|
||||
* pending interrupts are taken.
|
||||
*
|
||||
* We're now back in SVC mode, with interrupts
|
||||
* disabled. Enabling the interrupts now will have
|
||||
* the effect of taking the interrupt again, in SVC
|
||||
* mode this time.
|
||||
* Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
|
||||
* context synchronization event) is necessary to ensure that
|
||||
* pending interrupts are taken.
|
||||
*/
|
||||
local_irq_enable();
|
||||
isb();
|
||||
local_irq_disable();
|
||||
|
||||
guest_timing_exit_irqoff();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* We do local_irq_enable() before calling guest_exit() so
|
||||
* that if a timer interrupt hits while running the guest we
|
||||
* account that tick as being spent in the guest. We enable
|
||||
* preemption after calling guest_exit() so that if we get
|
||||
* preempted we make sure ticks after that is not counted as
|
||||
* guest time.
|
||||
*/
|
||||
guest_exit();
|
||||
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
|
||||
|
||||
/* Exit types that need handling before we can be preempted */
|
||||
|
|
|
@ -228,6 +228,14 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
|
|||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
if (ARM_SERROR_PENDING(exception_index)) {
|
||||
/*
|
||||
* The SError is handled by handle_exit_early(). If the guest
|
||||
* survives it will re-execute the original instruction.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
exception_index = ARM_EXCEPTION_CODE(exception_index);
|
||||
|
||||
switch (exception_index) {
|
||||
|
|
|
@ -402,6 +402,24 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
/*
|
||||
* Check for the conditions of Cortex-A510's #2077057. When these occur
|
||||
* SPSR_EL2 can't be trusted, but isn't needed either as it is
|
||||
* unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
|
||||
* Are we single-stepping the guest, and took a PAC exception from the
|
||||
* active-not-pending state?
|
||||
*/
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
|
||||
vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
|
||||
*vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
|
||||
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
|
||||
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
|
||||
|
||||
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true when we were able to fixup the guest exit and should return to
|
||||
* the guest, false when we should restore the host state and return to the
|
||||
|
@ -413,7 +431,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
* Save PSTATE early so that we can evaluate the vcpu mode
|
||||
* early on.
|
||||
*/
|
||||
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
synchronize_vcpu_pstate(vcpu, exit_code);
|
||||
|
||||
/*
|
||||
* Check whether we want to repaint the state one way or
|
||||
|
@ -424,7 +442,8 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||
|
||||
if (ARM_SERROR_PENDING(*exit_code)) {
|
||||
if (ARM_SERROR_PENDING(*exit_code) &&
|
||||
ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
|
||||
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
/*
|
||||
|
|
|
@ -55,9 +55,10 @@ WORKAROUND_1418040
|
|||
WORKAROUND_1463225
|
||||
WORKAROUND_1508412
|
||||
WORKAROUND_1542419
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2038923
|
||||
WORKAROUND_1902691
|
||||
WORKAROUND_2038923
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
|
|
|
@ -83,6 +83,8 @@
|
|||
label = "HDMI OUT";
|
||||
type = "a";
|
||||
|
||||
ddc-en-gpios = <&gpa 25 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
port {
|
||||
hdmi_con: endpoint {
|
||||
remote-endpoint = <&dw_hdmi_out>;
|
||||
|
@ -114,17 +116,6 @@
|
|||
gpio = <&gpf 14 GPIO_ACTIVE_LOW>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
hdmi_power: fixedregulator@3 {
|
||||
compatible = "regulator-fixed";
|
||||
|
||||
regulator-name = "hdmi_power";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
|
||||
gpio = <&gpa 25 0>;
|
||||
enable-active-high;
|
||||
};
|
||||
};
|
||||
|
||||
&ext {
|
||||
|
@ -576,8 +567,6 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pins_hdmi_ddc>;
|
||||
|
||||
hdmi-5v-supply = <&hdmi_power>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
|
|
@ -414,6 +414,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
|
||||
* the vCPU is running.
|
||||
*
|
||||
* This must be noinstr as instrumentation may make use of RCU, and this is not
|
||||
* safe during the EQS.
|
||||
*/
|
||||
static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
ret = kvm_mips_callbacks->vcpu_run(vcpu);
|
||||
guest_state_exit_irqoff();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r = -EINTR;
|
||||
|
@ -434,7 +452,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
lose_fpu(1);
|
||||
|
||||
local_irq_disable();
|
||||
guest_enter_irqoff();
|
||||
guest_timing_enter_irqoff();
|
||||
trace_kvm_enter(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -445,10 +463,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
r = kvm_mips_callbacks->vcpu_run(vcpu);
|
||||
r = kvm_mips_vcpu_enter_exit(vcpu);
|
||||
|
||||
/*
|
||||
* We must ensure that any pending interrupts are taken before
|
||||
* we exit guest timing so that timer ticks are accounted as
|
||||
* guest time. Transiently unmask interrupts so that any
|
||||
* pending interrupts are taken.
|
||||
*
|
||||
* TODO: is there a barrier which ensures that pending interrupts are
|
||||
* recognised? Currently this just hopes that the CPU takes any pending
|
||||
* interrupts between the enable and disable.
|
||||
*/
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
|
||||
trace_kvm_out(vcpu);
|
||||
guest_exit_irqoff();
|
||||
guest_timing_exit_irqoff();
|
||||
local_irq_enable();
|
||||
|
||||
out:
|
||||
|
@ -1168,7 +1199,7 @@ static void kvm_mips_set_c0_status(void)
|
|||
/*
|
||||
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
|
||||
*/
|
||||
int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
|
@ -1357,6 +1388,17 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guest_state_exit_irqoff();
|
||||
ret = __kvm_mips_handle_exit(vcpu);
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable FPU for guest and restore context */
|
||||
void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -90,6 +90,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
|||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *cntx;
|
||||
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
|
||||
|
||||
/* Mark this VCPU never ran */
|
||||
vcpu->arch.ran_atleast_once = false;
|
||||
|
@ -106,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
|||
cntx->hstatus |= HSTATUS_SPVP;
|
||||
cntx->hstatus |= HSTATUS_SPV;
|
||||
|
||||
/* By default, make CY, TM, and IR counters accessible in VU mode */
|
||||
reset_csr->scounteren = 0x7;
|
||||
|
||||
/* Setup VCPU timer */
|
||||
kvm_riscv_vcpu_timer_init(vcpu);
|
||||
|
||||
|
@ -699,6 +703,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
|
|||
csr_write(CSR_HVIP, csr->hvip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
|
||||
* the vCPU is running.
|
||||
*
|
||||
* This must be noinstr as instrumentation may make use of RCU, and this is not
|
||||
* safe during the EQS.
|
||||
*/
|
||||
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
guest_state_enter_irqoff();
|
||||
__kvm_riscv_switch_to(&vcpu->arch);
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
@ -790,9 +808,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
continue;
|
||||
}
|
||||
|
||||
guest_enter_irqoff();
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
__kvm_riscv_switch_to(&vcpu->arch);
|
||||
kvm_riscv_vcpu_enter_exit(vcpu);
|
||||
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
vcpu->stat.exits++;
|
||||
|
@ -812,25 +830,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
kvm_riscv_vcpu_sync_interrupts(vcpu);
|
||||
|
||||
/*
|
||||
* We may have taken a host interrupt in VS/VU-mode (i.e.
|
||||
* while executing the guest). This interrupt is still
|
||||
* pending, as we haven't serviced it yet!
|
||||
* We must ensure that any pending interrupts are taken before
|
||||
* we exit guest timing so that timer ticks are accounted as
|
||||
* guest time. Transiently unmask interrupts so that any
|
||||
* pending interrupts are taken.
|
||||
*
|
||||
* We're now back in HS-mode with interrupts disabled
|
||||
* so enabling the interrupts now will have the effect
|
||||
* of taking the interrupt again, in HS-mode this time.
|
||||
* There's no barrier which ensures that pending interrupts are
|
||||
* recognised, so we just hope that the CPU takes any pending
|
||||
* interrupts between the enable and disable.
|
||||
*/
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
|
||||
/*
|
||||
* We do local_irq_enable() before calling guest_exit() so
|
||||
* that if a timer interrupt hits while running the guest
|
||||
* we account that tick as being spent in the guest. We
|
||||
* enable preemption after calling guest_exit() so that if
|
||||
* we get preempted we make sure ticks after that is not
|
||||
* counted as guest time.
|
||||
*/
|
||||
guest_exit();
|
||||
guest_timing_exit_irqoff();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/version.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
|
@ -32,7 +33,7 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
*out_val = KVM_SBI_IMPID;
|
||||
break;
|
||||
case SBI_EXT_BASE_GET_IMP_VERSION:
|
||||
*out_val = 0;
|
||||
*out_val = LINUX_VERSION_CODE;
|
||||
break;
|
||||
case SBI_EXT_BASE_PROBE_EXT:
|
||||
if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
|
||||
|
|
|
@ -4667,6 +4667,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
|
|||
return -EINVAL;
|
||||
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
|
||||
return -E2BIG;
|
||||
if (!kvm_s390_pv_cpu_is_protected(vcpu))
|
||||
return -EINVAL;
|
||||
|
||||
switch (mop->op) {
|
||||
case KVM_S390_MEMOP_SIDA_READ:
|
||||
|
|
|
@ -18,12 +18,12 @@
|
|||
static int crypto_blake2s_update_x86(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
|
||||
return crypto_blake2s_update(desc, in, inlen, false);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress);
|
||||
return crypto_blake2s_final(desc, out, false);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
|
|
@ -4703,6 +4703,19 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|||
.lbr_read = intel_pmu_lbr_read_64,
|
||||
.lbr_save = intel_pmu_lbr_save,
|
||||
.lbr_restore = intel_pmu_lbr_restore,
|
||||
|
||||
/*
|
||||
* SMM has access to all 4 rings and while traditionally SMM code only
|
||||
* ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
|
||||
*
|
||||
* Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
|
||||
* between SMM or not, this results in what should be pure userspace
|
||||
* counters including SMM data.
|
||||
*
|
||||
* This is a clear privilege issue, therefore globally disable
|
||||
* counting SMM by default.
|
||||
*/
|
||||
.attr_freeze_on_smi = 1,
|
||||
};
|
||||
|
||||
static __init void intel_clovertown_quirk(void)
|
||||
|
|
|
@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt)
|
|||
* means we are already losing data; need to let the decoder
|
||||
* know.
|
||||
*/
|
||||
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
|
||||
buf->output_off == pt_buffer_region_size(buf)) {
|
||||
if (!buf->single &&
|
||||
(!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
|
||||
buf->output_off == pt_buffer_region_size(buf))) {
|
||||
perf_aux_output_flag(&pt->handle,
|
||||
PERF_AUX_FLAG_TRUNCATED);
|
||||
advance++;
|
||||
|
|
|
@ -82,7 +82,7 @@ KVM_X86_OP_NULL(guest_apic_has_interrupt)
|
|||
KVM_X86_OP(load_eoi_exitmap)
|
||||
KVM_X86_OP(set_virtual_apic_mode)
|
||||
KVM_X86_OP_NULL(set_apic_access_page_addr)
|
||||
KVM_X86_OP(deliver_posted_interrupt)
|
||||
KVM_X86_OP(deliver_interrupt)
|
||||
KVM_X86_OP_NULL(sync_pir_to_irr)
|
||||
KVM_X86_OP(set_tss_addr)
|
||||
KVM_X86_OP(set_identity_map_addr)
|
||||
|
|
|
@ -1410,7 +1410,8 @@ struct kvm_x86_ops {
|
|||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
|
||||
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||
void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector);
|
||||
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
|
||||
|
|
|
@ -43,20 +43,6 @@ static inline uint32_t xen_cpuid_base(void)
|
|||
return hypervisor_cpuid_base("XenVMMXenVMM", 2);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
extern bool __init xen_hvm_need_lapic(void);
|
||||
|
||||
static inline bool __init xen_x2apic_para_available(void)
|
||||
{
|
||||
return xen_hvm_need_lapic();
|
||||
}
|
||||
#else
|
||||
static inline bool __init xen_x2apic_para_available(void)
|
||||
{
|
||||
return (xen_cpuid_base() != 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
#ifdef CONFIG_XEN_PV_DOM0
|
||||
|
|
|
@ -554,12 +554,13 @@ void kvm_set_cpu_caps(void)
|
|||
);
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_7_0_EBX,
|
||||
F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
|
||||
F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
|
||||
F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
|
||||
F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
|
||||
F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
|
||||
);
|
||||
F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
|
||||
F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
|
||||
F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
|
||||
F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
|
||||
F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
|
||||
F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
|
||||
F(AVX512VL));
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_7_ECX,
|
||||
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
|
||||
|
|
|
@ -1096,14 +1096,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
|||
apic->regs + APIC_TMR);
|
||||
}
|
||||
|
||||
if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
}
|
||||
static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
|
||||
trig_mode, vector);
|
||||
break;
|
||||
|
||||
case APIC_DM_REMRD:
|
||||
|
|
|
@ -3291,6 +3291,21 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
|
|||
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
|
||||
}
|
||||
|
||||
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
|
||||
if (svm_deliver_avic_intr(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
}
|
||||
}
|
||||
|
||||
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -3615,7 +3630,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
|||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
unsigned long vmcb_pa = svm->current_vmcb->pa;
|
||||
|
||||
kvm_guest_enter_irqoff();
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm)) {
|
||||
__svm_sev_es_vcpu_run(vmcb_pa);
|
||||
|
@ -3635,7 +3650,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
|||
vmload(__sme_page_pa(sd->save_area));
|
||||
}
|
||||
|
||||
kvm_guest_exit_irqoff();
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
@ -4545,7 +4560,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||
.pmu_ops = &amd_pmu_ops,
|
||||
.nested_ops = &svm_nested_ops,
|
||||
|
||||
.deliver_posted_interrupt = svm_deliver_avic_intr,
|
||||
.deliver_interrupt = svm_deliver_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
|
||||
.update_pi_irte = svm_update_pi_irte,
|
||||
.setup_mce = svm_setup_mce,
|
||||
|
|
|
@ -4041,6 +4041,21 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
|
||||
if (vmx_deliver_posted_interrupt(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
|
||||
* will not change in the lifetime of the guest.
|
||||
|
@ -6754,7 +6769,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
|||
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
struct vcpu_vmx *vmx)
|
||||
{
|
||||
kvm_guest_enter_irqoff();
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
/* L1D Flush includes CPU buffer clear to mitigate MDS */
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||
|
@ -6770,7 +6785,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
|||
|
||||
vcpu->arch.cr2 = native_read_cr2();
|
||||
|
||||
kvm_guest_exit_irqoff();
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
@ -7768,7 +7783,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
|||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
|
||||
.deliver_interrupt = vmx_deliver_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
|
||||
|
||||
.set_tss_addr = vmx_set_tss_addr,
|
||||
|
|
|
@ -90,6 +90,8 @@
|
|||
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
|
||||
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
|
||||
|
||||
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
|
||||
|
||||
#define emul_to_vcpu(ctxt) \
|
||||
((struct kvm_vcpu *)(ctxt)->vcpu)
|
||||
|
||||
|
@ -4340,7 +4342,7 @@ static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
|
|||
void __user *uaddr = (void __user*)(unsigned long)attr->addr;
|
||||
|
||||
if ((u64)(unsigned long)uaddr != attr->addr)
|
||||
return ERR_PTR(-EFAULT);
|
||||
return ERR_PTR_USR(-EFAULT);
|
||||
return uaddr;
|
||||
}
|
||||
|
||||
|
@ -10041,6 +10043,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
set_debugreg(0, 7);
|
||||
}
|
||||
|
||||
guest_timing_enter_irqoff();
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Assert that vCPU vs. VM APICv state is consistent. An APICv
|
||||
|
@ -10125,7 +10129,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
* of accounting via context tracking, but the loss of accuracy is
|
||||
* acceptable for all known use cases.
|
||||
*/
|
||||
vtime_account_guest_exit();
|
||||
guest_timing_exit_irqoff();
|
||||
|
||||
if (lapic_in_kernel(vcpu)) {
|
||||
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
|
||||
|
@ -11639,8 +11643,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
|
|||
kvm_free_pit(kvm);
|
||||
}
|
||||
|
||||
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
|
||||
|
||||
/**
|
||||
* __x86_set_memory_region: Setup KVM internal memory slot
|
||||
*
|
||||
|
|
|
@ -10,51 +10,6 @@
|
|||
|
||||
void kvm_spurious_fault(void);
|
||||
|
||||
static __always_inline void kvm_guest_enter_irqoff(void)
|
||||
{
|
||||
/*
|
||||
* VMENTER enables interrupts (host state), but the kernel state is
|
||||
* interrupts disabled when this is invoked. Also tell RCU about
|
||||
* it. This is the same logic as for exit_to_user_mode().
|
||||
*
|
||||
* This ensures that e.g. latency analysis on the host observes
|
||||
* guest mode as interrupt enabled.
|
||||
*
|
||||
* guest_enter_irqoff() informs context tracking about the
|
||||
* transition to guest mode and if enabled adjusts RCU state
|
||||
* accordingly.
|
||||
*/
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
instrumentation_end();
|
||||
|
||||
guest_enter_irqoff();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
|
||||
static __always_inline void kvm_guest_exit_irqoff(void)
|
||||
{
|
||||
/*
|
||||
* VMEXIT disables interrupts (host state), but tracing and lockdep
|
||||
* have them in state 'on' as recorded before entering guest mode.
|
||||
* Same as enter_from_user_mode().
|
||||
*
|
||||
* context_tracking_guest_exit() restores host context and reinstates
|
||||
* RCU if enabled and required.
|
||||
*
|
||||
* This needs to be done immediately after VM-Exit, before any code
|
||||
* that might contain tracepoints or call out to the greater world,
|
||||
* e.g. before x86_spec_ctrl_restore_host().
|
||||
*/
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
context_tracking_guest_exit();
|
||||
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_off_finish();
|
||||
instrumentation_end();
|
||||
}
|
||||
|
||||
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
|
||||
({ \
|
||||
bool failed = (consistency_check); \
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <xen/events.h>
|
||||
#include <xen/interface/memory.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
@ -242,15 +243,9 @@ static __init int xen_parse_no_vector_callback(char *arg)
|
|||
}
|
||||
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
|
||||
|
||||
bool __init xen_hvm_need_lapic(void)
|
||||
static __init bool xen_x2apic_available(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
return false;
|
||||
if (!xen_hvm_domain())
|
||||
return false;
|
||||
if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
|
||||
return false;
|
||||
return true;
|
||||
return x2apic_supported();
|
||||
}
|
||||
|
||||
static __init void xen_hvm_guest_late_init(void)
|
||||
|
@ -312,7 +307,7 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
|
|||
.detect = xen_platform_hvm,
|
||||
.type = X86_HYPER_XEN_HVM,
|
||||
.init.init_platform = xen_hvm_guest_init,
|
||||
.init.x2apic_available = xen_x2apic_para_available,
|
||||
.init.x2apic_available = xen_x2apic_available,
|
||||
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||
.init.guest_late_init = xen_hvm_guest_late_init,
|
||||
.runtime.pin_vcpu = xen_pin_vcpu,
|
||||
|
|
|
@ -1341,10 +1341,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
|
||||
xen_acpi_sleep_register();
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
|
||||
|
||||
xen_boot_params_init_edd();
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
|
|
@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void __init xen_fill_possible_map(void)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
if (xen_initial_domain())
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
||||
if (rc >= 0) {
|
||||
num_processors++;
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __init xen_filter_cpu_maps(void)
|
||||
static void __init _get_smp_config(unsigned int early)
|
||||
{
|
||||
int i, rc;
|
||||
unsigned int subtract = 0;
|
||||
|
||||
if (!xen_initial_domain())
|
||||
if (early)
|
||||
return;
|
||||
|
||||
num_processors = 0;
|
||||
|
@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
|
|||
* sure the old memory can be recycled. */
|
||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||
|
||||
xen_filter_cpu_maps();
|
||||
xen_setup_vcpu_info_placement();
|
||||
|
||||
/*
|
||||
|
@ -476,5 +459,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
|
|||
void __init xen_smp_init(void)
|
||||
{
|
||||
smp_ops = xen_smp_ops;
|
||||
xen_fill_possible_map();
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = _get_smp_config;
|
||||
}
|
||||
|
|
|
@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
|
|||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
|
||||
|
||||
bip->bip_iter.bi_sector += bytes_done >> 9;
|
||||
bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
|
||||
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
|
||||
}
|
||||
|
||||
|
|
33
block/fops.c
33
block/fops.c
|
@ -566,34 +566,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
{
|
||||
struct block_device *bdev = iocb->ki_filp->private_data;
|
||||
loff_t size = bdev_nr_bytes(bdev);
|
||||
size_t count = iov_iter_count(to);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
size_t shorted = 0;
|
||||
ssize_t ret = 0;
|
||||
size_t count;
|
||||
|
||||
if (unlikely(pos + count > size)) {
|
||||
if (unlikely(pos + iov_iter_count(to) > size)) {
|
||||
if (pos >= size)
|
||||
return 0;
|
||||
size -= pos;
|
||||
if (count > size) {
|
||||
shorted = count - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
shorted = iov_iter_count(to) - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
|
||||
count = iov_iter_count(to);
|
||||
if (!count)
|
||||
goto reexpand; /* skip atime */
|
||||
|
||||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
|
||||
iocb->ki_pos + count - 1))
|
||||
return -EAGAIN;
|
||||
if (filemap_range_needs_writeback(mapping, pos,
|
||||
pos + count - 1)) {
|
||||
ret = -EAGAIN;
|
||||
goto reexpand;
|
||||
}
|
||||
} else {
|
||||
ret = filemap_write_and_wait_range(mapping,
|
||||
iocb->ki_pos,
|
||||
iocb->ki_pos + count - 1);
|
||||
ret = filemap_write_and_wait_range(mapping, pos,
|
||||
pos + count - 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
file_accessed(iocb->ki_filp);
|
||||
|
@ -603,12 +606,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
iocb->ki_pos += ret;
|
||||
count -= ret;
|
||||
}
|
||||
iov_iter_revert(to, count - iov_iter_count(to));
|
||||
if (ret < 0 || !count)
|
||||
return ret;
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
ret = filemap_read(iocb, to, ret);
|
||||
|
||||
reexpand:
|
||||
if (unlikely(shorted))
|
||||
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
|
||||
return ret;
|
||||
|
|
|
@ -1324,3 +1324,4 @@ module_exit(crypto_algapi_exit);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Cryptographic algorithms API");
|
||||
MODULE_SOFTDEP("pre: cryptomgr");
|
||||
|
|
|
@ -643,4 +643,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done);
|
|||
|
||||
MODULE_DESCRIPTION("Cryptographic core API");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_SOFTDEP("pre: cryptomgr");
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
static int crypto_blake2s_update_generic(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
|
||||
return crypto_blake2s_update(desc, in, inlen, true);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_generic);
|
||||
return crypto_blake2s_final(desc, out, true);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
|
|
|
@ -11,6 +11,7 @@ menuconfig ACPI
|
|||
depends on ARCH_SUPPORTS_ACPI
|
||||
select PNP
|
||||
select NLS
|
||||
select CRC32
|
||||
default y if X86
|
||||
help
|
||||
Advanced Configuration and Power Interface (ACPI) support for
|
||||
|
|
|
@ -2007,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log)
|
|||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
|
||||
return false;
|
||||
|
||||
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
|
||||
return false;
|
||||
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
|
||||
|
@ -2445,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev)
|
|||
struct ata_cpr_log *cpr_log = NULL;
|
||||
u8 *desc, *buf = NULL;
|
||||
|
||||
if (!ata_identify_page_supported(dev,
|
||||
ATA_LOG_CONCURRENT_POSITIONING_RANGES))
|
||||
if (ata_id_major_version(dev->id) < 11 ||
|
||||
!ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Read IDENTIFY DEVICE data log, page 0x47
|
||||
* (concurrent positioning ranges). We can have at most 255 32B range
|
||||
* descriptors plus a 64B header.
|
||||
* Read the concurrent positioning ranges log (0x47). We can have at
|
||||
* most 255 32B range descriptors plus a 64B header.
|
||||
*/
|
||||
buf_len = (64 + 255 * 32 + 511) & ~511;
|
||||
buf = kzalloc(buf_len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto out;
|
||||
|
||||
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
|
||||
ATA_LOG_CONCURRENT_POSITIONING_RANGES,
|
||||
buf, buf_len >> 9);
|
||||
err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
|
||||
0, buf, buf_len >> 9);
|
||||
if (err_mask)
|
||||
goto out;
|
||||
|
||||
|
@ -4073,6 +4074,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
|
||||
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
|
||||
|
||||
/*
|
||||
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
|
||||
* log page is accessed. Ensure we never ask for this log page with
|
||||
* these devices.
|
||||
*/
|
||||
{ "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
|
||||
|
||||
/* End Marker */
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -762,7 +762,7 @@ static bool crng_init_try_arch(struct crng_state *crng)
|
|||
return arch_init;
|
||||
}
|
||||
|
||||
static bool __init crng_init_try_arch_early(struct crng_state *crng)
|
||||
static bool __init crng_init_try_arch_early(void)
|
||||
{
|
||||
int i;
|
||||
bool arch_init = true;
|
||||
|
@ -774,7 +774,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
|
|||
rv = random_get_entropy();
|
||||
arch_init = false;
|
||||
}
|
||||
crng->state[i] ^= rv;
|
||||
primary_crng.state[i] ^= rv;
|
||||
}
|
||||
|
||||
return arch_init;
|
||||
|
@ -788,22 +788,20 @@ static void crng_initialize_secondary(struct crng_state *crng)
|
|||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
static void __init crng_initialize_primary(struct crng_state *crng)
|
||||
static void __init crng_initialize_primary(void)
|
||||
{
|
||||
_extract_entropy(&crng->state[4], sizeof(u32) * 12);
|
||||
if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
|
||||
_extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
|
||||
if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
pr_notice("crng init done (trusting CPU's manufacturer)\n");
|
||||
}
|
||||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
static void crng_finalize_init(struct crng_state *crng)
|
||||
static void crng_finalize_init(void)
|
||||
{
|
||||
if (crng != &primary_crng || crng_init >= 2)
|
||||
return;
|
||||
if (!system_wq) {
|
||||
/* We can't call numa_crng_init until we have workqueues,
|
||||
* so mark this for processing later. */
|
||||
|
@ -814,6 +812,7 @@ static void crng_finalize_init(struct crng_state *crng)
|
|||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
crng_need_final_init = false;
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
|
@ -980,7 +979,8 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool)
|
|||
memzero_explicit(&buf, sizeof(buf));
|
||||
WRITE_ONCE(crng->init_time, jiffies);
|
||||
spin_unlock_irqrestore(&crng->lock, flags);
|
||||
crng_finalize_init(crng);
|
||||
if (crng == &primary_crng && crng_init < 2)
|
||||
crng_finalize_init();
|
||||
}
|
||||
|
||||
static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
|
||||
|
@ -1697,8 +1697,8 @@ int __init rand_initialize(void)
|
|||
{
|
||||
init_std_data();
|
||||
if (crng_need_final_init)
|
||||
crng_finalize_init(&primary_crng);
|
||||
crng_initialize_primary(&primary_crng);
|
||||
crng_finalize_init();
|
||||
crng_initialize_primary();
|
||||
crng_global_init_time = jiffies;
|
||||
if (ratelimit_disable) {
|
||||
urandom_warning.interval = 0;
|
||||
|
@ -1856,7 +1856,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
|||
*/
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
input_pool.entropy_count = 0;
|
||||
if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
return 0;
|
||||
case RNDRESEEDCRNG:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -2205,13 +2208,15 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
|||
return;
|
||||
}
|
||||
|
||||
/* Suspend writing if we're above the trickle threshold.
|
||||
/* Throttle writing if we're above the trickle threshold.
|
||||
* We'll be woken up again once below random_write_wakeup_thresh,
|
||||
* or when the calling thread is about to terminate.
|
||||
* when the calling thread is about to terminate, or once
|
||||
* CRNG_RESEED_INTERVAL has lapsed.
|
||||
*/
|
||||
wait_event_interruptible(random_write_wait,
|
||||
wait_event_interruptible_timeout(random_write_wait,
|
||||
!system_wq || kthread_should_stop() ||
|
||||
POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
|
||||
POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
|
||||
CRNG_RESEED_INTERVAL);
|
||||
mix_pool_bytes(buffer, count);
|
||||
credit_entropy_bits(entropy);
|
||||
}
|
||||
|
|
|
@ -1753,7 +1753,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
|
|||
char engs_info[2 * OTX2_CPT_NAME_LENGTH];
|
||||
struct otx2_cpt_eng_grp_info *grp;
|
||||
struct otx2_cpt_engs_rsvd *engs;
|
||||
u32 mask[4];
|
||||
int i, j;
|
||||
|
||||
pr_debug("Engine groups global info");
|
||||
|
@ -1785,6 +1784,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
|
|||
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
|
||||
engs = &grp->engs[j];
|
||||
if (engs->type) {
|
||||
u32 mask[5] = { };
|
||||
|
||||
get_engs_info(grp, engs_info,
|
||||
2 * OTX2_CPT_NAME_LENGTH, j);
|
||||
pr_debug("Slot%d: %s", j, engs_info);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/xarray.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/dma-heap.h>
|
||||
|
@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
|
|||
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
|
||||
return -EINVAL;
|
||||
|
||||
nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
|
||||
/* Get the kernel ioctl cmd that matches */
|
||||
kcmd = dma_heap_ioctl_cmds[nr];
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
|
|||
if (irq < 0) {
|
||||
edac_printk(KERN_ERR, EDAC_MC,
|
||||
"No irq %d in DT\n", irq);
|
||||
return -ENODEV;
|
||||
return irq;
|
||||
}
|
||||
|
||||
/* Arria10 has a 2nd IRQ */
|
||||
|
|
|
@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
|
|||
irq = platform_get_irq_optional(pdev, i);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "No IRQ resource\n");
|
||||
rc = -EINVAL;
|
||||
rc = irq;
|
||||
goto out_err;
|
||||
}
|
||||
rc = devm_request_irq(&pdev->dev, irq,
|
||||
|
|
|
@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
|
|||
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
void amdgpu_acpi_detect(void);
|
||||
#else
|
||||
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
||||
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline void amdgpu_acpi_detect(void) { }
|
||||
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
|
||||
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
||||
|
@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
|
|||
enum amdgpu_ss ss_state) { return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||
#endif
|
||||
|
||||
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
uint64_t addr, struct amdgpu_bo **bo,
|
||||
struct amdgpu_bo_va_mapping **mapping);
|
||||
|
|
|
@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
|
|||
}
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_SUSPEND)
|
||||
/**
|
||||
* amdgpu_acpi_is_s3_active
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* returns true if supported, false if not.
|
||||
*/
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
|
||||
{
|
||||
return !(adev->flags & AMD_IS_APU) ||
|
||||
(pm_suspend_target_state == PM_SUSPEND_MEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_s0ix_active
|
||||
*
|
||||
|
@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
|
|||
*/
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
|
||||
if (!(adev->flags & AMD_IS_APU) ||
|
||||
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
|
||||
return false;
|
||||
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
|
||||
dev_warn_once(adev->dev,
|
||||
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
|
||||
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !IS_ENABLED(CONFIG_AMD_PMC)
|
||||
dev_warn_once(adev->dev,
|
||||
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif /* CONFIG_AMD_PMC */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
|
|
@ -2246,13 +2246,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
|
|||
static int amdgpu_pmops_prepare(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
/* Return a positive number here so
|
||||
* DPM_FLAG_SMART_SUSPEND works properly
|
||||
*/
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
return pm_runtime_suspended(dev) &&
|
||||
pm_suspend_via_firmware();
|
||||
return pm_runtime_suspended(dev);
|
||||
|
||||
/* if we will not support s3 or s2i for the device
|
||||
* then skip suspend
|
||||
*/
|
||||
if (!amdgpu_acpi_is_s0ix_active(adev) &&
|
||||
!amdgpu_acpi_is_s3_active(adev))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1904,7 +1904,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (direct_submit && !ring->sched.ready) {
|
||||
if (!direct_submit && !ring->sched.ready) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -1140,6 +1140,9 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
|
||||
return;
|
||||
|
||||
adev->mmhub.funcs->get_clockgating(adev, flags);
|
||||
|
||||
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
|
||||
|
|
|
@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = {
|
|||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 7.95,
|
||||
.sr_enter_plus_exit_time_us = 9,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = {
|
|||
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_wm_table = {
|
||||
static struct wm_table ddr5_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 6.09,
|
||||
.sr_enter_plus_exit_time_us = 7.14,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
|
@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct(
|
|||
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
|
||||
dcn31_bw_params.wm_table = lpddr5_wm_table;
|
||||
} else {
|
||||
dcn31_bw_params.wm_table = ddr4_wm_table;
|
||||
dcn31_bw_params.wm_table = ddr5_wm_table;
|
||||
}
|
||||
/* Saved clocks configured at boot for debug purposes */
|
||||
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
|
||||
|
|
|
@ -5597,6 +5597,26 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
dp_hw_fw_revision.ieee_fw_rev,
|
||||
sizeof(dp_hw_fw_revision.ieee_fw_rev));
|
||||
|
||||
/* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
|
||||
{
|
||||
uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
|
||||
uint8_t fwrev_mbp_2018[] = { 7, 4 };
|
||||
uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
|
||||
|
||||
/* We also check for the firmware revision as 16,1 models have an
|
||||
* identical device id and are incorrectly quirked otherwise.
|
||||
*/
|
||||
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
|
||||
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
|
||||
sizeof(str_mbp_2018)) &&
|
||||
(!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
|
||||
sizeof(fwrev_mbp_2018)) ||
|
||||
!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
|
||||
sizeof(fwrev_mbp_2018_vega)))) {
|
||||
link->reported_link_cap.link_rate = LINK_RATE_RBR2;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&link->dpcd_caps.dsc_caps, '\0',
|
||||
sizeof(link->dpcd_caps.dsc_caps));
|
||||
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
|
||||
|
|
|
@ -1608,11 +1608,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
pipe_ctx->stream_res.stream_enc,
|
||||
pipe_ctx->stream_res.tg->inst);
|
||||
|
||||
if (dc_is_embedded_signal(pipe_ctx->stream->signal) &&
|
||||
pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
|
||||
pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
|
||||
|
||||
|
|
|
@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets(
|
|||
|
||||
}
|
||||
|
||||
void enc1_stream_encoder_reset_fifo(
|
||||
struct stream_encoder *enc)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
/* set DIG_START to 0x1 to reset FIFO */
|
||||
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
|
||||
udelay(100);
|
||||
|
||||
/* write 0 to take the FIFO out of reset */
|
||||
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
|
||||
}
|
||||
|
||||
void enc1_stream_encoder_dp_blank(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc)
|
||||
|
@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
|
|||
enc1_stream_encoder_send_immediate_sdp_message,
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.reset_fifo =
|
||||
enc1_stream_encoder_reset_fifo,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
|
|
|
@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
|
|||
void enc1_stream_encoder_stop_dp_info_packets(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
void enc1_stream_encoder_reset_fifo(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
void enc1_stream_encoder_dp_blank(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc);
|
||||
|
|
|
@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
|
|||
enc1_stream_encoder_send_immediate_sdp_message,
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.reset_fifo =
|
||||
enc1_stream_encoder_reset_fifo,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
|
|
|
@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
|
|||
enc3_stream_encoder_update_dp_info_packets,
|
||||
.stop_dp_info_packets =
|
||||
enc1_stream_encoder_stop_dp_info_packets,
|
||||
.reset_fifo =
|
||||
enc1_stream_encoder_reset_fifo,
|
||||
.dp_blank =
|
||||
enc1_stream_encoder_dp_blank,
|
||||
.dp_unblank =
|
||||
|
|
|
@ -164,10 +164,6 @@ struct stream_encoder_funcs {
|
|||
void (*stop_dp_info_packets)(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
void (*reset_fifo)(
|
||||
struct stream_encoder *enc
|
||||
);
|
||||
|
||||
void (*dp_blank)(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *enc);
|
||||
|
|
|
@ -3696,14 +3696,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
|||
|
||||
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
PPTable_t *smc_pptable = table_context->driver_pptable;
|
||||
uint16_t *mgpu_fan_boost_limit_rpm;
|
||||
|
||||
GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
|
||||
/*
|
||||
* Skip the MGpuFanBoost setting for those ASICs
|
||||
* which do not support it
|
||||
*/
|
||||
if (!smc_pptable->MGpuFanBoostLimitRpm)
|
||||
if (*mgpu_fan_boost_limit_rpm == 0)
|
||||
return 0;
|
||||
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
|
|
|
@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
|
|||
const struct intel_crtc_state *pipe_config =
|
||||
overlay->crtc->config;
|
||||
|
||||
if (rec->dst_height == 0 || rec->dst_width == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (rec->dst_x < pipe_config->pipe_src_w &&
|
||||
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
|
||||
rec->dst_y < pipe_config->pipe_src_h &&
|
||||
|
|
|
@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
|
|||
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
u32 val;
|
||||
|
||||
val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
|
||||
val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
|
||||
if (val == 0xffffffff) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: PHY in TCCOLD, assuming not complete\n",
|
||||
|
|
|
@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
|
|||
timeout) < 0) {
|
||||
i915_request_put(rq);
|
||||
|
||||
tl = intel_context_timeline_lock(ce);
|
||||
/*
|
||||
* Error path, cannot use intel_context_timeline_lock as
|
||||
* that is user interruptable and this clean up step
|
||||
* must be done.
|
||||
*/
|
||||
mutex_lock(&ce->timeline->mutex);
|
||||
intel_context_exit(ce);
|
||||
intel_context_timeline_unlock(tl);
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
|
||||
if (nonblock)
|
||||
return -EWOULDBLOCK;
|
||||
|
|
|
@ -206,6 +206,11 @@ struct intel_guc {
|
|||
* context usage for overflows.
|
||||
*/
|
||||
struct delayed_work work;
|
||||
|
||||
/**
|
||||
* @shift: Right shift value for the gpm timestamp
|
||||
*/
|
||||
u32 shift;
|
||||
} timestamp;
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SELFTEST
|
||||
|
|
|
@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
|
|||
if (new_start == lower_32_bits(*prev_start))
|
||||
return;
|
||||
|
||||
/*
|
||||
* When gt is unparked, we update the gt timestamp and start the ping
|
||||
* worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
|
||||
* is unparked, all switched in contexts will have a start time that is
|
||||
* within +/- POLL_TIME_CLKS of the most recent gt_stamp.
|
||||
*
|
||||
* If neither gt_stamp nor new_start has rolled over, then the
|
||||
* gt_stamp_hi does not need to be adjusted, however if one of them has
|
||||
* rolled over, we need to adjust gt_stamp_hi accordingly.
|
||||
*
|
||||
* The below conditions address the cases of new_start rollover and
|
||||
* gt_stamp_last rollover respectively.
|
||||
*/
|
||||
if (new_start < gt_stamp_last &&
|
||||
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
|
||||
gt_stamp_hi++;
|
||||
|
@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
|
|||
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
|
||||
}
|
||||
|
||||
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
||||
/*
|
||||
* GuC updates shared memory and KMD reads it. Since this is not synchronized,
|
||||
* we run into a race where the value read is inconsistent. Sometimes the
|
||||
* inconsistency is in reading the upper MSB bytes of the last_in value when
|
||||
* this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
|
||||
* 24 bits are zero. Since these are non-zero values, it is non-trivial to
|
||||
* determine validity of these values. Instead we read the values multiple times
|
||||
* until they are consistent. In test runs, 3 attempts results in consistent
|
||||
* values. The upper bound is set to 6 attempts and may need to be tuned as per
|
||||
* any new occurences.
|
||||
*/
|
||||
static void __get_engine_usage_record(struct intel_engine_cs *engine,
|
||||
u32 *last_in, u32 *id, u32 *total)
|
||||
{
|
||||
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
*last_in = READ_ONCE(rec->last_switch_in_stamp);
|
||||
*id = READ_ONCE(rec->current_context_index);
|
||||
*total = READ_ONCE(rec->total_runtime);
|
||||
|
||||
if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
|
||||
READ_ONCE(rec->current_context_index) == *id &&
|
||||
READ_ONCE(rec->total_runtime) == *total)
|
||||
break;
|
||||
} while (++i < 6);
|
||||
}
|
||||
|
||||
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_guc_stats *stats = &engine->stats.guc;
|
||||
struct intel_guc *guc = &engine->gt->uc.guc;
|
||||
u32 last_switch = rec->last_switch_in_stamp;
|
||||
u32 ctx_id = rec->current_context_index;
|
||||
u32 total = rec->total_runtime;
|
||||
u32 last_switch, ctx_id, total;
|
||||
|
||||
lockdep_assert_held(&guc->timestamp.lock);
|
||||
|
||||
__get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
|
||||
|
||||
stats->running = ctx_id != ~0U && last_switch;
|
||||
if (stats->running)
|
||||
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
|
||||
|
@ -1149,23 +1190,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
|||
}
|
||||
}
|
||||
|
||||
static void guc_update_pm_timestamp(struct intel_guc *guc,
|
||||
struct intel_engine_cs *engine,
|
||||
ktime_t *now)
|
||||
static u32 gpm_timestamp_shift(struct intel_gt *gt)
|
||||
{
|
||||
u32 gt_stamp_now, gt_stamp_hi;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 reg, shift;
|
||||
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
|
||||
|
||||
shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
|
||||
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
|
||||
|
||||
return 3 - shift;
|
||||
}
|
||||
|
||||
static u64 gpm_timestamp(struct intel_gt *gt)
|
||||
{
|
||||
u32 lo, hi, old_hi, loop = 0;
|
||||
|
||||
hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
|
||||
do {
|
||||
lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
|
||||
old_hi = hi;
|
||||
hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
|
||||
} while (old_hi != hi && loop++ < 2);
|
||||
|
||||
return ((u64)hi << 32) | lo;
|
||||
}
|
||||
|
||||
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
u32 gt_stamp_lo, gt_stamp_hi;
|
||||
u64 gpm_ts;
|
||||
|
||||
lockdep_assert_held(&guc->timestamp.lock);
|
||||
|
||||
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
|
||||
gt_stamp_now = intel_uncore_read(engine->uncore,
|
||||
RING_TIMESTAMP(engine->mmio_base));
|
||||
gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
|
||||
gt_stamp_lo = lower_32_bits(gpm_ts);
|
||||
*now = ktime_get();
|
||||
|
||||
if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
|
||||
if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
|
||||
gt_stamp_hi++;
|
||||
|
||||
guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
|
||||
guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1208,8 +1277,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
|
|||
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
|
||||
stats_saved = *stats;
|
||||
gt_stamp_saved = guc->timestamp.gt_stamp;
|
||||
/*
|
||||
* Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
|
||||
* start_gt_clk' calculation below for active engines.
|
||||
*/
|
||||
guc_update_engine_gt_clks(engine);
|
||||
guc_update_pm_timestamp(guc, engine, now);
|
||||
guc_update_pm_timestamp(guc, now);
|
||||
intel_gt_pm_put_async(gt);
|
||||
if (i915_reset_count(gpu_error) != reset_count) {
|
||||
*stats = stats_saved;
|
||||
|
@ -1241,8 +1314,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
|
|||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
|
||||
guc_update_pm_timestamp(guc, &unused);
|
||||
for_each_engine(engine, gt, id) {
|
||||
guc_update_pm_timestamp(guc, engine, &unused);
|
||||
guc_update_engine_gt_clks(engine);
|
||||
engine->stats.guc.prev_total = 0;
|
||||
}
|
||||
|
@ -1259,10 +1332,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
|
|||
ktime_t unused;
|
||||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
for_each_engine(engine, gt, id) {
|
||||
guc_update_pm_timestamp(guc, engine, &unused);
|
||||
|
||||
guc_update_pm_timestamp(guc, &unused);
|
||||
for_each_engine(engine, gt, id)
|
||||
guc_update_engine_gt_clks(engine);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1335,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt)
|
|||
void intel_guc_busyness_unpark(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_guc *guc = >->uc.guc;
|
||||
unsigned long flags;
|
||||
ktime_t unused;
|
||||
|
||||
if (!guc_submission_initialized(guc))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&guc->timestamp.lock, flags);
|
||||
guc_update_pm_timestamp(guc, &unused);
|
||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
|
||||
guc->timestamp.ping_delay);
|
||||
}
|
||||
|
@ -1783,6 +1862,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
|
|||
spin_lock_init(&guc->timestamp.lock);
|
||||
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
|
||||
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
|
||||
guc->timestamp.shift = gpm_timestamp_shift(gt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine,
|
|||
struct i915_request *rq = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
|
||||
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
|
||||
if (!ee)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
|
||||
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
|
||||
|
||||
#define GUCPMTIMESTAMP _MMIO(0xC3E8)
|
||||
#define MISC_STATUS0 _MMIO(0xA500)
|
||||
#define MISC_STATUS1 _MMIO(0xA504)
|
||||
|
||||
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
|
||||
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
|
||||
|
|
|
@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
|
|||
case LAYER_1:
|
||||
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
|
||||
break;
|
||||
case LAYER_2:
|
||||
kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
|
||||
break;
|
||||
case LAYER_3:
|
||||
kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
|
||||
break;
|
||||
}
|
||||
|
||||
kmb->plane_status[plane_id].disable = true;
|
||||
|
|
|
@ -361,7 +361,11 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||
bridge_state =
|
||||
drm_atomic_get_new_bridge_state(state,
|
||||
mxsfb->bridge);
|
||||
bus_format = bridge_state->input_bus_cfg.format;
|
||||
if (!bridge_state)
|
||||
bus_format = MEDIA_BUS_FMT_FIXED;
|
||||
else
|
||||
bus_format = bridge_state->input_bus_cfg.format;
|
||||
|
||||
if (bus_format == MEDIA_BUS_FMT_FIXED) {
|
||||
dev_warn_once(drm->dev,
|
||||
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
|
||||
|
|
|
@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
|
|||
*addr += bios->imaged_addr;
|
||||
}
|
||||
|
||||
if (unlikely(*addr + size >= bios->size)) {
|
||||
if (unlikely(*addr + size > bios->size)) {
|
||||
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work)
|
|||
ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
|
||||
if (ret) {
|
||||
rdma_destroy_ah_attr(&ah_attr);
|
||||
return -EINVAL;
|
||||
goto deref;
|
||||
}
|
||||
|
||||
spin_lock_irq(&cm_id_priv->lock);
|
||||
|
|
|
@ -67,8 +67,8 @@ static const char * const cma_events[] = {
|
|||
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
|
||||
};
|
||||
|
||||
static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
|
||||
union ib_gid *mgid);
|
||||
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
|
||||
enum ib_gid_type gid_type);
|
||||
|
||||
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
|
||||
{
|
||||
|
@ -1846,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv,
|
|||
if (dev_addr->bound_dev_if)
|
||||
ndev = dev_get_by_index(dev_addr->net,
|
||||
dev_addr->bound_dev_if);
|
||||
if (ndev) {
|
||||
if (ndev && !send_only) {
|
||||
enum ib_gid_type gid_type;
|
||||
union ib_gid mgid;
|
||||
|
||||
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
|
||||
&mgid);
|
||||
|
||||
if (!send_only)
|
||||
cma_igmp_send(ndev, &mgid, false);
|
||||
|
||||
dev_put(ndev);
|
||||
gid_type = id_priv->cma_dev->default_gid_type
|
||||
[id_priv->id.port_num -
|
||||
rdma_start_port(
|
||||
id_priv->cma_dev->device)];
|
||||
cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
|
||||
gid_type);
|
||||
cma_igmp_send(ndev, &mgid, false);
|
||||
}
|
||||
dev_put(ndev);
|
||||
|
||||
cancel_work_sync(&mc->iboe_join.work);
|
||||
}
|
||||
|
|
|
@ -95,6 +95,7 @@ struct ucma_context {
|
|||
u64 uid;
|
||||
|
||||
struct list_head list;
|
||||
struct list_head mc_list;
|
||||
struct work_struct close_work;
|
||||
};
|
||||
|
||||
|
@ -105,6 +106,7 @@ struct ucma_multicast {
|
|||
|
||||
u64 uid;
|
||||
u8 join_state;
|
||||
struct list_head list;
|
||||
struct sockaddr_storage addr;
|
||||
};
|
||||
|
||||
|
@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
|||
|
||||
INIT_WORK(&ctx->close_work, ucma_close_id);
|
||||
init_completion(&ctx->comp);
|
||||
INIT_LIST_HEAD(&ctx->mc_list);
|
||||
/* So list_del() will work if we don't do ucma_finish_ctx() */
|
||||
INIT_LIST_HEAD(&ctx->list);
|
||||
ctx->file = file;
|
||||
|
@ -484,19 +487,19 @@ err1:
|
|||
|
||||
static void ucma_cleanup_multicast(struct ucma_context *ctx)
|
||||
{
|
||||
struct ucma_multicast *mc;
|
||||
unsigned long index;
|
||||
struct ucma_multicast *mc, *tmp;
|
||||
|
||||
xa_for_each(&multicast_table, index, mc) {
|
||||
if (mc->ctx != ctx)
|
||||
continue;
|
||||
xa_lock(&multicast_table);
|
||||
list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
|
||||
list_del(&mc->list);
|
||||
/*
|
||||
* At this point mc->ctx->ref is 0 so the mc cannot leave the
|
||||
* lock on the reader and this is enough serialization
|
||||
*/
|
||||
xa_erase(&multicast_table, index);
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
kfree(mc);
|
||||
}
|
||||
xa_unlock(&multicast_table);
|
||||
}
|
||||
|
||||
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
|
||||
|
@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file,
|
|||
mc->uid = cmd->uid;
|
||||
memcpy(&mc->addr, addr, cmd->addr_size);
|
||||
|
||||
if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
|
||||
xa_lock(&multicast_table);
|
||||
if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
|
||||
GFP_KERNEL)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_mc;
|
||||
}
|
||||
|
||||
list_add_tail(&mc->list, &ctx->mc_list);
|
||||
xa_unlock(&multicast_table);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
|
||||
join_state, mc);
|
||||
|
@ -1500,8 +1507,11 @@ err_leave_multicast:
|
|||
mutex_unlock(&ctx->mutex);
|
||||
ucma_cleanup_mc_events(mc);
|
||||
err_xa_erase:
|
||||
xa_erase(&multicast_table, mc->id);
|
||||
xa_lock(&multicast_table);
|
||||
list_del(&mc->list);
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
err_free_mc:
|
||||
xa_unlock(&multicast_table);
|
||||
kfree(mc);
|
||||
err_put_ctx:
|
||||
ucma_put_ctx(ctx);
|
||||
|
@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
|
|||
mc = ERR_PTR(-EINVAL);
|
||||
else if (!refcount_inc_not_zero(&mc->ctx->ref))
|
||||
mc = ERR_PTR(-ENXIO);
|
||||
else
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
xa_unlock(&multicast_table);
|
||||
|
||||
if (IS_ERR(mc)) {
|
||||
xa_unlock(&multicast_table);
|
||||
ret = PTR_ERR(mc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&mc->list);
|
||||
__xa_erase(&multicast_table, mc->id);
|
||||
xa_unlock(&multicast_table);
|
||||
|
||||
mutex_lock(&mc->ctx->mutex);
|
||||
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
|
||||
mutex_unlock(&mc->ctx->mutex);
|
||||
|
|
|
@ -55,7 +55,7 @@ union hfi1_ipoib_flow {
|
|||
*/
|
||||
struct ipoib_txreq {
|
||||
struct sdma_txreq txreq;
|
||||
struct hfi1_sdma_header sdma_hdr;
|
||||
struct hfi1_sdma_header *sdma_hdr;
|
||||
int sdma_status;
|
||||
int complete;
|
||||
struct hfi1_ipoib_dev_priv *priv;
|
||||
|
|
|
@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
|
|||
int ret;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = priv->netdev_ops->ndo_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_ret;
|
||||
|
||||
ret = hfi1_netdev_add_data(priv->dd,
|
||||
qpn_from_mac(priv->netdev->dev_addr),
|
||||
dev);
|
||||
if (ret < 0) {
|
||||
priv->netdev_ops->ndo_uninit(dev);
|
||||
return ret;
|
||||
goto out_ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_ret:
|
||||
free_percpu(dev->tstats);
|
||||
dev->tstats = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
|
||||
|
||||
free_percpu(dev->tstats);
|
||||
dev->tstats = NULL;
|
||||
|
||||
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
|
||||
|
||||
priv->netdev_ops->ndo_uninit(dev);
|
||||
|
@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
|
|||
hfi1_ipoib_rxq_deinit(priv->netdev);
|
||||
|
||||
free_percpu(dev->tstats);
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
|
||||
{
|
||||
hfi1_ipoib_netdev_dtor(dev);
|
||||
free_netdev(dev);
|
||||
dev->tstats = NULL;
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
|
||||
|
@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
|
|||
priv->port_num = port_num;
|
||||
priv->netdev_ops = netdev->netdev_ops;
|
||||
|
||||
netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
|
||||
|
||||
ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
|
||||
|
||||
rc = hfi1_ipoib_txreq_init(priv);
|
||||
if (rc) {
|
||||
dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
|
||||
hfi1_ipoib_free_rdma_netdev(netdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = hfi1_ipoib_rxq_init(netdev);
|
||||
if (rc) {
|
||||
dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
|
||||
hfi1_ipoib_free_rdma_netdev(netdev);
|
||||
hfi1_ipoib_txreq_deinit(priv);
|
||||
return rc;
|
||||
}
|
||||
|
||||
netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
|
||||
|
||||
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
|
||||
netdev->needs_free_netdev = true;
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
|
|||
dd_dev_warn(priv->dd,
|
||||
"%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
|
||||
__func__, tx->sdma_status,
|
||||
le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
|
||||
le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
|
||||
tx->txq->sde->this_idx);
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
|
|||
{
|
||||
struct hfi1_devdata *dd = txp->dd;
|
||||
struct sdma_txreq *txreq = &tx->txreq;
|
||||
struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
|
||||
struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
|
||||
u16 pkt_bytes =
|
||||
sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
|
||||
int ret;
|
||||
|
@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
|
|||
struct ipoib_txparms *txp)
|
||||
{
|
||||
struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
|
||||
struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
|
||||
struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
|
||||
struct sk_buff *skb = tx->skb;
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
|
||||
struct rdma_ah_attr *ah_attr = txp->ah_attr;
|
||||
|
@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
|
|||
if (likely(!ret)) {
|
||||
tx_ok:
|
||||
trace_sdma_output_ibhdr(txq->priv->dd,
|
||||
&tx->sdma_hdr.hdr,
|
||||
&tx->sdma_hdr->hdr,
|
||||
ib_is_sc5(txp->flow.sc5));
|
||||
hfi1_ipoib_check_queue_depth(txq);
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
|
|||
hfi1_ipoib_check_queue_depth(txq);
|
||||
|
||||
trace_sdma_output_ibhdr(txq->priv->dd,
|
||||
&tx->sdma_hdr.hdr,
|
||||
&tx->sdma_hdr->hdr,
|
||||
ib_is_sc5(txp->flow.sc5));
|
||||
|
||||
if (!netdev_xmit_more())
|
||||
|
@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
{
|
||||
struct net_device *dev = priv->netdev;
|
||||
u32 tx_ring_size, tx_item_size;
|
||||
int i;
|
||||
struct hfi1_ipoib_circ_buf *tx_ring;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* Ring holds 1 less than tx_ring_size
|
||||
|
@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
struct ipoib_txreq *tx;
|
||||
|
||||
tx_ring = &txq->tx_ring;
|
||||
iowait_init(&txq->wait,
|
||||
0,
|
||||
hfi1_ipoib_flush_txq,
|
||||
|
@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
priv->dd->node);
|
||||
|
||||
txq->tx_ring.items =
|
||||
kcalloc_node(tx_ring_size, tx_item_size,
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
kvzalloc_node(array_size(tx_ring_size, tx_item_size),
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
if (!txq->tx_ring.items)
|
||||
goto free_txqs;
|
||||
|
||||
txq->tx_ring.max_items = tx_ring_size;
|
||||
txq->tx_ring.shift = ilog2(tx_ring_size);
|
||||
txq->tx_ring.shift = ilog2(tx_item_size);
|
||||
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
|
||||
tx_ring = &txq->tx_ring;
|
||||
for (j = 0; j < tx_ring_size; j++)
|
||||
hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
|
||||
kzalloc_node(sizeof(*tx->sdma_hdr),
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
|
||||
netif_tx_napi_add(dev, &txq->napi,
|
||||
hfi1_ipoib_poll_tx_ring,
|
||||
|
@ -746,7 +754,10 @@ free_txqs:
|
|||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
|
||||
netif_napi_del(&txq->napi);
|
||||
kfree(txq->tx_ring.items);
|
||||
tx_ring = &txq->tx_ring;
|
||||
for (j = 0; j < tx_ring_size; j++)
|
||||
kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
|
||||
kvfree(tx_ring->items);
|
||||
}
|
||||
|
||||
kfree(priv->txqs);
|
||||
|
@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
|
|||
|
||||
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < priv->netdev->num_tx_queues; i++) {
|
||||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
|
||||
|
||||
iowait_cancel_work(&txq->wait);
|
||||
iowait_sdma_drain(&txq->wait);
|
||||
hfi1_ipoib_drain_tx_list(txq);
|
||||
netif_napi_del(&txq->napi);
|
||||
hfi1_ipoib_drain_tx_ring(txq);
|
||||
kfree(txq->tx_ring.items);
|
||||
for (j = 0; j < tx_ring->max_items; j++)
|
||||
kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
|
||||
kvfree(tx_ring->items);
|
||||
}
|
||||
|
||||
kfree(priv->txqs);
|
||||
|
|
|
@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
|||
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
|
||||
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
|
||||
if (!ew)
|
||||
break;
|
||||
return;
|
||||
|
||||
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
|
||||
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
|
||||
|
|
|
@ -3073,6 +3073,8 @@ do_write:
|
|||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
|
||||
goto inv_err;
|
||||
if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
|
||||
goto inv_err;
|
||||
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
|
||||
wqe->atomic_wr.remote_addr,
|
||||
wqe->atomic_wr.rkey,
|
||||
|
|
|
@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
|
|||
return &qp->orq[qp->orq_get % qp->attrs.orq_size];
|
||||
}
|
||||
|
||||
static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
|
||||
{
|
||||
return &qp->orq[qp->orq_put % qp->attrs.orq_size];
|
||||
}
|
||||
|
||||
static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
|
||||
{
|
||||
struct siw_sqe *orq_e = orq_get_tail(qp);
|
||||
struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
|
||||
|
||||
if (READ_ONCE(orq_e->flags) == 0)
|
||||
return orq_e;
|
||||
|
|
|
@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
|
|||
|
||||
spin_lock_irqsave(&qp->orq_lock, flags);
|
||||
|
||||
rreq = orq_get_current(qp);
|
||||
|
||||
/* free current orq entry */
|
||||
rreq = orq_get_current(qp);
|
||||
WRITE_ONCE(rreq->flags, 0);
|
||||
|
||||
qp->orq_get++;
|
||||
|
||||
if (qp->tx_ctx.orq_fence) {
|
||||
if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
|
||||
pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
|
||||
|
@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
|
|||
rv = -EPROTO;
|
||||
goto out;
|
||||
}
|
||||
/* resume SQ processing */
|
||||
/* resume SQ processing, if possible */
|
||||
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
|
||||
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
|
||||
rreq = orq_get_tail(qp);
|
||||
|
||||
/* SQ processing was stopped because of a full ORQ */
|
||||
rreq = orq_get_free(qp);
|
||||
if (unlikely(!rreq)) {
|
||||
pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
|
||||
rv = -EPROTO;
|
||||
|
@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
|
|||
resume_tx = 1;
|
||||
|
||||
} else if (siw_orq_empty(qp)) {
|
||||
/*
|
||||
* SQ processing was stopped by fenced work request.
|
||||
* Resume since all previous Read's are now completed.
|
||||
*/
|
||||
qp->tx_ctx.orq_fence = 0;
|
||||
resume_tx = 1;
|
||||
} else {
|
||||
pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
|
||||
qp_id(qp), qp->orq_get, qp->orq_put);
|
||||
rv = -EPROTO;
|
||||
}
|
||||
}
|
||||
qp->orq_get++;
|
||||
out:
|
||||
spin_unlock_irqrestore(&qp->orq_lock, flags);
|
||||
|
||||
|
|
|
@ -313,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
|
|||
|
||||
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
|
||||
siw_dbg(base_dev, "too many QP's\n");
|
||||
return -ENOMEM;
|
||||
rv = -ENOMEM;
|
||||
goto err_atomic;
|
||||
}
|
||||
if (attrs->qp_type != IB_QPT_RC) {
|
||||
siw_dbg(base_dev, "only RC QP's supported\n");
|
||||
|
|
|
@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm)
|
|||
* extensions)
|
||||
*/
|
||||
wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
|
||||
if (!wm->touch_dev) {
|
||||
ret = -ENOMEM;
|
||||
goto touch_err;
|
||||
}
|
||||
if (!wm->touch_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(wm->touch_dev, wm);
|
||||
wm->touch_dev->dev.parent = wm->dev;
|
||||
wm->touch_dev->dev.platform_data = pdata;
|
||||
|
@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm)
|
|||
return 0;
|
||||
touch_reg_err:
|
||||
platform_device_put(wm->touch_dev);
|
||||
touch_err:
|
||||
input_unregister_device(wm->input_dev);
|
||||
wm->input_dev = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -639,8 +635,6 @@ touch_err:
|
|||
static void wm97xx_unregister_touch(struct wm97xx *wm)
|
||||
{
|
||||
platform_device_unregister(wm->touch_dev);
|
||||
input_unregister_device(wm->input_dev);
|
||||
wm->input_dev = NULL;
|
||||
}
|
||||
|
||||
static int _wm97xx_probe(struct wm97xx *wm)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/apic.h>
|
||||
|
@ -834,6 +835,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
|
|||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
if (WARN_ON(i >= LOOP_TIMEOUT))
|
||||
|
|
|
@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
|||
fn, &intel_ir_domain_ops,
|
||||
iommu);
|
||||
if (!iommu->ir_domain) {
|
||||
irq_domain_free_fwnode(fn);
|
||||
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
|
||||
goto out_free_bitmap;
|
||||
goto out_free_fwnode;
|
||||
}
|
||||
iommu->ir_msi_domain =
|
||||
arch_create_remap_msi_irq_domain(iommu->ir_domain,
|
||||
|
@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
|||
|
||||
if (dmar_enable_qi(iommu)) {
|
||||
pr_err("Failed to enable queued invalidation\n");
|
||||
goto out_free_bitmap;
|
||||
goto out_free_ir_domain;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
|||
|
||||
return 0;
|
||||
|
||||
out_free_ir_domain:
|
||||
if (iommu->ir_msi_domain)
|
||||
irq_domain_remove(iommu->ir_msi_domain);
|
||||
iommu->ir_msi_domain = NULL;
|
||||
irq_domain_remove(iommu->ir_domain);
|
||||
iommu->ir_domain = NULL;
|
||||
out_free_fwnode:
|
||||
irq_domain_free_fwnode(fn);
|
||||
out_free_bitmap:
|
||||
bitmap_free(bitmap);
|
||||
out_free_pages:
|
||||
|
|
|
@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc);
|
|||
|
||||
/**
|
||||
* ioasid_get - obtain a reference to the IOASID
|
||||
* @ioasid: the ID to get
|
||||
*/
|
||||
void ioasid_get(ioasid_t ioasid)
|
||||
{
|
||||
|
|
|
@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
|
|||
|
||||
static void dev_iommu_free(struct device *dev)
|
||||
{
|
||||
iommu_fwspec_free(dev);
|
||||
kfree(dev->iommu);
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
|
||||
dev->iommu = NULL;
|
||||
if (param->fwspec) {
|
||||
fwnode_handle_put(param->fwspec->iommu_fwnode);
|
||||
kfree(param->fwspec);
|
||||
}
|
||||
kfree(param);
|
||||
}
|
||||
|
||||
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
|
||||
|
@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_for_each_dev - iterate over each device in the group
|
||||
* @group: the group
|
||||
* @data: caller opaque data to be passed to callback function
|
||||
* @fn: caller supplied callback function
|
||||
*
|
||||
* This function is called by group users to iterate over group devices.
|
||||
* Callers should hold a reference count to the group during callback.
|
||||
* The group->mutex is held across callbacks, which will block calls to
|
||||
* iommu_group_add/remove_device.
|
||||
*/
|
||||
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
|
@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* iommu_group_for_each_dev - iterate over each device in the group
|
||||
* @group: the group
|
||||
* @data: caller opaque data to be passed to callback function
|
||||
* @fn: caller supplied callback function
|
||||
*
|
||||
* This function is called by group users to iterate over group devices.
|
||||
* Callers should hold a reference count to the group during callback.
|
||||
* The group->mutex is held across callbacks, which will block calls to
|
||||
* iommu_group_add/remove_device.
|
||||
*/
|
||||
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
|
@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
|
|||
* iommu_sva_bind_device() - Bind a process address space to a device
|
||||
* @dev: the device
|
||||
* @mm: the mm to bind, caller must hold a reference to it
|
||||
* @drvdata: opaque data pointer to pass to bind callback
|
||||
*
|
||||
* Create a bond between device and address space, allowing the device to access
|
||||
* the mm using the returned PASID. If a bond already exists between @device and
|
||||
|
|
|
@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
|
|||
}
|
||||
|
||||
/**
|
||||
* omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
|
||||
* omap_iommu_prepare - prepare() dev_pm_ops implementation
|
||||
* @dev: iommu device
|
||||
*
|
||||
* This function performs the necessary checks to determine if the IOMMU
|
||||
|
|
|
@ -5869,10 +5869,6 @@ int md_run(struct mddev *mddev)
|
|||
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
|
||||
}
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
|
||||
if (!bioset_initialized(&mddev->bio_set)) {
|
||||
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (err)
|
||||
|
@ -6010,6 +6006,10 @@ int md_run(struct mddev *mddev)
|
|||
else
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
|
||||
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
}
|
||||
if (pers->sync_request) {
|
||||
if (mddev->kobj.sd &&
|
||||
|
|
|
@ -67,7 +67,7 @@ static const unsigned int sd_au_size[] = {
|
|||
__res & __mask; \
|
||||
})
|
||||
|
||||
#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
|
||||
#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
|
||||
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
|
||||
|
||||
struct sd_busy_data {
|
||||
|
@ -1664,6 +1664,12 @@ static int sd_poweroff_notify(struct mmc_card *card)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Find out when the command is completed. */
|
||||
err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
|
||||
MMC_BUSY_EXTR_SINGLE);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
cb_data.card = card;
|
||||
cb_data.reg_buf = reg_buf;
|
||||
err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
|
||||
|
|
|
@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev)
|
|||
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
|
||||
dma_release_channel(host->dma_chan_rx);
|
||||
mmc_remove_host(mmc);
|
||||
mmc_free_host(mmc);
|
||||
|
||||
writel(0, host->base + REG_INTERRUPT_MASK);
|
||||
writel(0, host->base + REG_POWER_CONTROL);
|
||||
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
|
||||
host->base + REG_CLOCK_CONTROL);
|
||||
mmc_free_host(mmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
|
|||
|
||||
static int esdhc_of_enable_dma(struct sdhci_host *host)
|
||||
{
|
||||
int ret;
|
||||
u32 value;
|
||||
struct device *dev = mmc_dev(host->mmc);
|
||||
|
||||
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
|
||||
of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
|
||||
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
|
||||
of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
|
||||
|
||||
|
|
|
@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
|
|||
struct dma_slave_config cfg = { 0, };
|
||||
|
||||
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
cfg.direction = direction;
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM) {
|
||||
|
|
|
@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
|
|||
if (port->aggregator &&
|
||||
port->aggregator->is_active &&
|
||||
!__port_is_enabled(port)) {
|
||||
|
||||
__enable_port(port);
|
||||
*update_slave_arr = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
|
|||
port = port->next_port_in_aggregator) {
|
||||
__enable_port(port);
|
||||
}
|
||||
*update_slave_arr = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
|
|||
get_device(&priv->master_mii_bus->dev);
|
||||
priv->master_mii_dn = dn;
|
||||
|
||||
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
|
||||
priv->slave_mii_bus = mdiobus_alloc();
|
||||
if (!priv->slave_mii_bus) {
|
||||
of_node_put(dn);
|
||||
return -ENOMEM;
|
||||
|
@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
|
|||
}
|
||||
|
||||
err = mdiobus_register(priv->slave_mii_bus);
|
||||
if (err && dn)
|
||||
if (err && dn) {
|
||||
mdiobus_free(priv->slave_mii_bus);
|
||||
of_node_put(dn);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
|
|||
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
|
||||
{
|
||||
mdiobus_unregister(priv->slave_mii_bus);
|
||||
mdiobus_free(priv->slave_mii_bus);
|
||||
of_node_put(priv->master_mii_dn);
|
||||
}
|
||||
|
||||
|
|
|
@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
|
|||
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
|
||||
{
|
||||
struct dsa_switch *ds = priv->ds;
|
||||
int err;
|
||||
|
||||
ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
|
||||
ds->slave_mii_bus = mdiobus_alloc();
|
||||
if (!ds->slave_mii_bus)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
|
|||
ds->slave_mii_bus->parent = priv->dev;
|
||||
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
|
||||
|
||||
return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
|
||||
err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
|
||||
if (err)
|
||||
mdiobus_free(ds->slave_mii_bus);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
|
||||
|
@ -2145,8 +2150,10 @@ disable_switch:
|
|||
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
|
||||
dsa_unregister_switch(priv->ds);
|
||||
mdio_bus:
|
||||
if (mdio_np)
|
||||
if (mdio_np) {
|
||||
mdiobus_unregister(priv->ds->slave_mii_bus);
|
||||
mdiobus_free(priv->ds->slave_mii_bus);
|
||||
}
|
||||
put_mdio_node:
|
||||
of_node_put(mdio_np);
|
||||
for (i = 0; i < priv->num_gphy_fw; i++)
|
||||
|
@ -2169,6 +2176,7 @@ static int gswip_remove(struct platform_device *pdev)
|
|||
|
||||
if (priv->ds->slave_mii_bus) {
|
||||
mdiobus_unregister(priv->ds->slave_mii_bus);
|
||||
mdiobus_free(priv->ds->slave_mii_bus);
|
||||
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
|
||||
}
|
||||
|
||||
|
|
|
@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
|
|||
if (priv->irq)
|
||||
mt7530_setup_mdio_irq(priv);
|
||||
|
||||
ret = mdiobus_register(bus);
|
||||
ret = devm_mdiobus_register(dev, bus);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
|
||||
if (priv->irq)
|
||||
|
|
|
@ -3566,7 +3566,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
|
|||
return err;
|
||||
}
|
||||
|
||||
bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
|
||||
bus = mdiobus_alloc_size(sizeof(*mdio_bus));
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3591,14 +3591,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
|
|||
if (!external) {
|
||||
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = of_mdiobus_register(bus, np);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
|
||||
mv88e6xxx_g2_irq_mdio_free(chip, bus);
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (external)
|
||||
|
@ -3607,21 +3607,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
|
|||
list_add(&mdio_bus->list, &chip->mdios);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
mdiobus_free(bus);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
|
||||
|
||||
{
|
||||
struct mv88e6xxx_mdio_bus *mdio_bus;
|
||||
struct mv88e6xxx_mdio_bus *mdio_bus, *p;
|
||||
struct mii_bus *bus;
|
||||
|
||||
list_for_each_entry(mdio_bus, &chip->mdios, list) {
|
||||
list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
|
||||
bus = mdio_bus->bus;
|
||||
|
||||
if (!mdio_bus->external)
|
||||
mv88e6xxx_g2_irq_mdio_free(chip, bus);
|
||||
|
||||
mdiobus_unregister(bus);
|
||||
mdiobus_free(bus);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
|
|||
return PTR_ERR(hw);
|
||||
}
|
||||
|
||||
bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
|
||||
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
|
|||
rc = mdiobus_register(bus);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "failed to register MDIO bus\n");
|
||||
mdiobus_free(bus);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
|
|||
lynx_pcs_destroy(phylink_pcs);
|
||||
}
|
||||
mdiobus_unregister(felix->imdio);
|
||||
mdiobus_free(felix->imdio);
|
||||
}
|
||||
|
||||
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
|
||||
|
|
|
@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
|
|||
}
|
||||
|
||||
/* Needed in order to initialize the bus mutex lock */
|
||||
rc = of_mdiobus_register(bus, NULL);
|
||||
rc = devm_of_mdiobus_register(dev, bus, NULL);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "failed to register MDIO bus\n");
|
||||
return rc;
|
||||
|
@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
|
|||
mdio_device_free(mdio_device);
|
||||
lynx_pcs_destroy(phylink_pcs);
|
||||
}
|
||||
mdiobus_unregister(felix->imdio);
|
||||
|
||||
/* mdiobus_unregister and mdiobus_free handled by devres */
|
||||
}
|
||||
|
||||
static const struct felix_info seville_info_vsc9953 = {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue