Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
1d68101367
|
@ -108,12 +108,13 @@ some, but not all of the other indices changing.
|
||||||
|
|
||||||
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
|
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
|
||||||
will not need to allocate memory. The :c:func:`xa_reserve` function
|
will not need to allocate memory. The :c:func:`xa_reserve` function
|
||||||
will store a reserved entry at the indicated index. Users of the normal
|
will store a reserved entry at the indicated index. Users of the
|
||||||
API will see this entry as containing ``NULL``. If you do not need to
|
normal API will see this entry as containing ``NULL``. If you do
|
||||||
use the reserved entry, you can call :c:func:`xa_release` to remove the
|
not need to use the reserved entry, you can call :c:func:`xa_release`
|
||||||
unused entry. If another user has stored to the entry in the meantime,
|
to remove the unused entry. If another user has stored to the entry
|
||||||
:c:func:`xa_release` will do nothing; if instead you want the entry to
|
in the meantime, :c:func:`xa_release` will do nothing; if instead you
|
||||||
become ``NULL``, you should use :c:func:`xa_erase`.
|
want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
|
||||||
|
Using :c:func:`xa_insert` on a reserved entry will fail.
|
||||||
|
|
||||||
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
|
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
|
||||||
will return ``true``.
|
will return ``true``.
|
||||||
|
@ -183,6 +184,8 @@ Takes xa_lock internally:
|
||||||
* :c:func:`xa_store_bh`
|
* :c:func:`xa_store_bh`
|
||||||
* :c:func:`xa_store_irq`
|
* :c:func:`xa_store_irq`
|
||||||
* :c:func:`xa_insert`
|
* :c:func:`xa_insert`
|
||||||
|
* :c:func:`xa_insert_bh`
|
||||||
|
* :c:func:`xa_insert_irq`
|
||||||
* :c:func:`xa_erase`
|
* :c:func:`xa_erase`
|
||||||
* :c:func:`xa_erase_bh`
|
* :c:func:`xa_erase_bh`
|
||||||
* :c:func:`xa_erase_irq`
|
* :c:func:`xa_erase_irq`
|
||||||
|
|
|
@ -27,7 +27,6 @@ Example:
|
||||||
reg = <0x04300000 0x20000>;
|
reg = <0x04300000 0x20000>;
|
||||||
reg-names = "kgsl_3d0_reg_memory";
|
reg-names = "kgsl_3d0_reg_memory";
|
||||||
interrupts = <GIC_SPI 80 0>;
|
interrupts = <GIC_SPI 80 0>;
|
||||||
interrupt-names = "kgsl_3d0_irq";
|
|
||||||
clock-names =
|
clock-names =
|
||||||
"core",
|
"core",
|
||||||
"iface",
|
"iface",
|
||||||
|
|
45
MAINTAINERS
45
MAINTAINERS
|
@ -3052,8 +3052,8 @@ F: include/linux/bcm963xx_nvram.h
|
||||||
F: include/linux/bcm963xx_tag.h
|
F: include/linux/bcm963xx_tag.h
|
||||||
|
|
||||||
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
||||||
M: Rasesh Mody <rasesh.mody@cavium.com>
|
M: Rasesh Mody <rmody@marvell.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/broadcom/bnx2.*
|
F: drivers/net/ethernet/broadcom/bnx2.*
|
||||||
|
@ -3072,9 +3072,9 @@ S: Supported
|
||||||
F: drivers/scsi/bnx2i/
|
F: drivers/scsi/bnx2i/
|
||||||
|
|
||||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||||
M: Ariel Elior <ariel.elior@cavium.com>
|
M: Ariel Elior <aelior@marvell.com>
|
||||||
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
|
M: Sudarsana Kalluru <skalluru@marvell.com>
|
||||||
M: everest-linux-l2@cavium.com
|
M: GR-everest-linux-l2@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/broadcom/bnx2x/
|
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||||
|
@ -3249,9 +3249,9 @@ S: Supported
|
||||||
F: drivers/scsi/bfa/
|
F: drivers/scsi/bfa/
|
||||||
|
|
||||||
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
|
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
|
||||||
M: Rasesh Mody <rasesh.mody@cavium.com>
|
M: Rasesh Mody <rmody@marvell.com>
|
||||||
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
|
M: Sudarsana Kalluru <skalluru@marvell.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/brocade/bna/
|
F: drivers/net/ethernet/brocade/bna/
|
||||||
|
@ -3978,6 +3978,7 @@ F: drivers/cpufreq/arm_big_little.c
|
||||||
CPU POWER MONITORING SUBSYSTEM
|
CPU POWER MONITORING SUBSYSTEM
|
||||||
M: Thomas Renninger <trenn@suse.com>
|
M: Thomas Renninger <trenn@suse.com>
|
||||||
M: Shuah Khan <shuah@kernel.org>
|
M: Shuah Khan <shuah@kernel.org>
|
||||||
|
M: Shuah Khan <skhan@linuxfoundation.org>
|
||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: tools/power/cpupower/
|
F: tools/power/cpupower/
|
||||||
|
@ -8265,6 +8266,7 @@ F: include/uapi/linux/sunrpc/
|
||||||
|
|
||||||
KERNEL SELFTEST FRAMEWORK
|
KERNEL SELFTEST FRAMEWORK
|
||||||
M: Shuah Khan <shuah@kernel.org>
|
M: Shuah Khan <shuah@kernel.org>
|
||||||
|
M: Shuah Khan <skhan@linuxfoundation.org>
|
||||||
L: linux-kselftest@vger.kernel.org
|
L: linux-kselftest@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
|
||||||
Q: https://patchwork.kernel.org/project/linux-kselftest/list/
|
Q: https://patchwork.kernel.org/project/linux-kselftest/list/
|
||||||
|
@ -10696,9 +10698,9 @@ S: Maintained
|
||||||
F: drivers/net/netdevsim/*
|
F: drivers/net/netdevsim/*
|
||||||
|
|
||||||
NETXEN (1/10) GbE SUPPORT
|
NETXEN (1/10) GbE SUPPORT
|
||||||
M: Manish Chopra <manish.chopra@cavium.com>
|
M: Manish Chopra <manishc@marvell.com>
|
||||||
M: Rahul Verma <rahul.verma@cavium.com>
|
M: Rahul Verma <rahulv@marvell.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/qlogic/netxen/
|
F: drivers/net/ethernet/qlogic/netxen/
|
||||||
|
@ -12482,8 +12484,8 @@ S: Supported
|
||||||
F: drivers/scsi/qedi/
|
F: drivers/scsi/qedi/
|
||||||
|
|
||||||
QLOGIC QL4xxx ETHERNET DRIVER
|
QLOGIC QL4xxx ETHERNET DRIVER
|
||||||
M: Ariel Elior <Ariel.Elior@cavium.com>
|
M: Ariel Elior <aelior@marvell.com>
|
||||||
M: everest-linux-l2@cavium.com
|
M: GR-everest-linux-l2@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/qlogic/qed/
|
F: drivers/net/ethernet/qlogic/qed/
|
||||||
|
@ -12491,8 +12493,8 @@ F: include/linux/qed/
|
||||||
F: drivers/net/ethernet/qlogic/qede/
|
F: drivers/net/ethernet/qlogic/qede/
|
||||||
|
|
||||||
QLOGIC QL4xxx RDMA DRIVER
|
QLOGIC QL4xxx RDMA DRIVER
|
||||||
M: Michal Kalderon <Michal.Kalderon@cavium.com>
|
M: Michal Kalderon <mkalderon@marvell.com>
|
||||||
M: Ariel Elior <Ariel.Elior@cavium.com>
|
M: Ariel Elior <aelior@marvell.com>
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/infiniband/hw/qedr/
|
F: drivers/infiniband/hw/qedr/
|
||||||
|
@ -12512,7 +12514,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
|
||||||
F: drivers/scsi/qla2xxx/
|
F: drivers/scsi/qla2xxx/
|
||||||
|
|
||||||
QLOGIC QLA3XXX NETWORK DRIVER
|
QLOGIC QLA3XXX NETWORK DRIVER
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
|
F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
|
||||||
|
@ -12526,16 +12528,16 @@ F: Documentation/scsi/LICENSE.qla4xxx
|
||||||
F: drivers/scsi/qla4xxx/
|
F: drivers/scsi/qla4xxx/
|
||||||
|
|
||||||
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
||||||
M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
|
M: Shahed Shaikh <shshaikh@marvell.com>
|
||||||
M: Manish Chopra <manish.chopra@cavium.com>
|
M: Manish Chopra <manishc@marvell.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/qlogic/qlcnic/
|
F: drivers/net/ethernet/qlogic/qlcnic/
|
||||||
|
|
||||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||||
M: Manish Chopra <manish.chopra@cavium.com>
|
M: Manish Chopra <manishc@marvell.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/qlogic/qlge/
|
F: drivers/net/ethernet/qlogic/qlge/
|
||||||
|
@ -15857,6 +15859,7 @@ F: drivers/usb/common/usb-otg-fsm.c
|
||||||
USB OVER IP DRIVER
|
USB OVER IP DRIVER
|
||||||
M: Valentina Manea <valentina.manea.m@gmail.com>
|
M: Valentina Manea <valentina.manea.m@gmail.com>
|
||||||
M: Shuah Khan <shuah@kernel.org>
|
M: Shuah Khan <shuah@kernel.org>
|
||||||
|
M: Shuah Khan <skhan@linuxfoundation.org>
|
||||||
L: linux-usb@vger.kernel.org
|
L: linux-usb@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/usb/usbip_protocol.txt
|
F: Documentation/usb/usbip_protocol.txt
|
||||||
|
|
|
@ -3,23 +3,19 @@ generic-y += bugs.h
|
||||||
generic-y += compat.h
|
generic-y += compat.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += dma-mapping.h
|
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
generic-y += extable.h
|
generic-y += extable.h
|
||||||
generic-y += fb.h
|
|
||||||
generic-y += ftrace.h
|
generic-y += ftrace.h
|
||||||
generic-y += hardirq.h
|
generic-y += hardirq.h
|
||||||
generic-y += hw_irq.h
|
generic-y += hw_irq.h
|
||||||
generic-y += irq_regs.h
|
generic-y += irq_regs.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
generic-y += kmap_types.h
|
|
||||||
generic-y += local.h
|
generic-y += local.h
|
||||||
generic-y += local64.h
|
generic-y += local64.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
generic-y += mm-arch-hooks.h
|
generic-y += mm-arch-hooks.h
|
||||||
generic-y += msi.h
|
generic-y += msi.h
|
||||||
generic-y += parport.h
|
generic-y += parport.h
|
||||||
generic-y += pci.h
|
|
||||||
generic-y += percpu.h
|
generic-y += percpu.h
|
||||||
generic-y += preempt.h
|
generic-y += preempt.h
|
||||||
generic-y += topology.h
|
generic-y += topology.h
|
||||||
|
|
|
@ -216,6 +216,14 @@ struct bcr_fp_arcv2 {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct bcr_actionpoint {
|
||||||
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
|
unsigned int pad:21, min:1, num:2, ver:8;
|
||||||
|
#else
|
||||||
|
unsigned int ver:8, num:2, min:1, pad:21;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
#include <soc/arc/timers.h>
|
#include <soc/arc/timers.h>
|
||||||
|
|
||||||
struct bcr_bpu_arcompact {
|
struct bcr_bpu_arcompact {
|
||||||
|
@ -283,7 +291,7 @@ struct cpuinfo_arc_cache {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cpuinfo_arc_bpu {
|
struct cpuinfo_arc_bpu {
|
||||||
unsigned int ver, full, num_cache, num_pred;
|
unsigned int ver, full, num_cache, num_pred, ret_stk;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cpuinfo_arc_ccm {
|
struct cpuinfo_arc_ccm {
|
||||||
|
@ -302,7 +310,7 @@ struct cpuinfo_arc {
|
||||||
struct {
|
struct {
|
||||||
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
|
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
|
||||||
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
|
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
|
||||||
debug:1, ap:1, smart:1, rtt:1, pad3:4,
|
ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
|
||||||
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
||||||
} extn;
|
} extn;
|
||||||
struct bcr_mpy extn_mpy;
|
struct bcr_mpy extn_mpy;
|
||||||
|
|
|
@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
|
||||||
/*
|
/*
|
||||||
* __ffs: Similar to ffs, but zero based (0-31)
|
* __ffs: Similar to ffs, but zero based (0-31)
|
||||||
*/
|
*/
|
||||||
static inline __attribute__ ((const)) int __ffs(unsigned long word)
|
static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
|
||||||
{
|
{
|
||||||
if (!word)
|
if (!word)
|
||||||
return word;
|
return word;
|
||||||
|
@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
|
||||||
/*
|
/*
|
||||||
* __ffs: Similar to ffs, but zero based (0-31)
|
* __ffs: Similar to ffs, but zero based (0-31)
|
||||||
*/
|
*/
|
||||||
static inline __attribute__ ((const)) int __ffs(unsigned long x)
|
static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
|
||||||
{
|
{
|
||||||
int n;
|
unsigned long n;
|
||||||
|
|
||||||
asm volatile(
|
asm volatile(
|
||||||
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
|
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
|
||||||
|
|
|
@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
|
||||||
|
|
||||||
/* counts condition */
|
/* counts condition */
|
||||||
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
||||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
|
/* All jump instructions that are taken */
|
||||||
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
|
||||||
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
||||||
#ifdef CONFIG_ISA_ARCV2
|
#ifdef CONFIG_ISA_ARCV2
|
||||||
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
|
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
|
||||||
|
|
|
@ -1,15 +1,10 @@
|
||||||
/*
|
// SPDX-License-Identifier: GPL-2.0+
|
||||||
* Linux performance counter support for ARC700 series
|
//
|
||||||
*
|
// Linux performance counter support for ARC CPUs.
|
||||||
* Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
|
// This code is inspired by the perf support of various other architectures.
|
||||||
*
|
//
|
||||||
* This code is inspired by the perf support of various other architectures.
|
// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -19,12 +14,31 @@
|
||||||
#include <asm/arcregs.h>
|
#include <asm/arcregs.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
|
||||||
|
/* HW holds 8 symbols + one for null terminator */
|
||||||
|
#define ARCPMU_EVENT_NAME_LEN 9
|
||||||
|
|
||||||
|
enum arc_pmu_attr_groups {
|
||||||
|
ARCPMU_ATTR_GR_EVENTS,
|
||||||
|
ARCPMU_ATTR_GR_FORMATS,
|
||||||
|
ARCPMU_NR_ATTR_GR
|
||||||
|
};
|
||||||
|
|
||||||
|
struct arc_pmu_raw_event_entry {
|
||||||
|
char name[ARCPMU_EVENT_NAME_LEN];
|
||||||
|
};
|
||||||
|
|
||||||
struct arc_pmu {
|
struct arc_pmu {
|
||||||
struct pmu pmu;
|
struct pmu pmu;
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
int n_counters;
|
int n_counters;
|
||||||
|
int n_events;
|
||||||
u64 max_period;
|
u64 max_period;
|
||||||
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
|
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
|
||||||
|
|
||||||
|
struct arc_pmu_raw_event_entry *raw_entry;
|
||||||
|
struct attribute **attrs;
|
||||||
|
struct perf_pmu_events_attr *attr;
|
||||||
|
const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arc_pmu_cpu {
|
struct arc_pmu_cpu {
|
||||||
|
@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
|
||||||
{
|
{
|
||||||
struct arc_callchain_trace *ctrl = data;
|
struct arc_callchain_trace *ctrl = data;
|
||||||
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
|
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
|
||||||
|
|
||||||
perf_callchain_store(entry, addr);
|
perf_callchain_store(entry, addr);
|
||||||
|
|
||||||
if (ctrl->depth++ < 3)
|
if (ctrl->depth++ < 3)
|
||||||
|
@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct arc_callchain_trace ctrl = {
|
struct arc_callchain_trace ctrl = {
|
||||||
.depth = 0,
|
.depth = 0,
|
||||||
|
@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
|
||||||
arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
|
arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* User stack can't be unwound trivially with kernel dwarf unwinder
|
* User stack can't be unwound trivially with kernel dwarf unwinder
|
||||||
|
@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
|
||||||
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
|
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
|
||||||
|
|
||||||
/* read counter #idx; note that counter# != event# on ARC! */
|
/* read counter #idx; note that counter# != event# on ARC! */
|
||||||
static uint64_t arc_pmu_read_counter(int idx)
|
static u64 arc_pmu_read_counter(int idx)
|
||||||
{
|
{
|
||||||
uint32_t tmp;
|
u32 tmp;
|
||||||
uint64_t result;
|
u64 result;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARC supports making 'snapshots' of the counters, so we don't
|
* ARC supports making 'snapshots' of the counters, so we don't
|
||||||
|
@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
|
||||||
write_aux_reg(ARC_REG_PCT_INDEX, idx);
|
write_aux_reg(ARC_REG_PCT_INDEX, idx);
|
||||||
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
|
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
|
||||||
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
|
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
|
||||||
result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
|
result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
|
||||||
result |= read_aux_reg(ARC_REG_PCT_SNAPL);
|
result |= read_aux_reg(ARC_REG_PCT_SNAPL);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
|
||||||
static void arc_perf_event_update(struct perf_event *event,
|
static void arc_perf_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc, int idx)
|
struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
uint64_t prev_raw_count = local64_read(&hwc->prev_count);
|
u64 prev_raw_count = local64_read(&hwc->prev_count);
|
||||||
uint64_t new_raw_count = arc_pmu_read_counter(idx);
|
u64 new_raw_count = arc_pmu_read_counter(idx);
|
||||||
int64_t delta = new_raw_count - prev_raw_count;
|
s64 delta = new_raw_count - prev_raw_count;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We aren't afraid of hwc->prev_count changing beneath our feet
|
* We aren't afraid of hwc->prev_count changing beneath our feet
|
||||||
|
@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!is_sampling_event(event)) {
|
if (!is_sampling_event(event)) {
|
||||||
hwc->sample_period = arc_pmu->max_period;
|
hwc->sample_period = arc_pmu->max_period;
|
||||||
hwc->last_period = hwc->sample_period;
|
hwc->last_period = hwc->sample_period;
|
||||||
local64_set(&hwc->period_left, hwc->sample_period);
|
local64_set(&hwc->period_left, hwc->sample_period);
|
||||||
}
|
}
|
||||||
|
@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
|
||||||
pr_debug("init cache event with h/w %08x \'%s\'\n",
|
pr_debug("init cache event with h/w %08x \'%s\'\n",
|
||||||
(int)hwc->config, arc_pmu_ev_hw_map[ret]);
|
(int)hwc->config, arc_pmu_ev_hw_map[ret]);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
case PERF_TYPE_RAW:
|
||||||
|
if (event->attr.config >= arc_pmu->n_events)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
hwc->config |= event->attr.config;
|
||||||
|
pr_debug("init raw event with idx %lld \'%s\'\n",
|
||||||
|
event->attr.config,
|
||||||
|
arc_pmu->raw_entry[event->attr.config].name);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
|
||||||
/* starts all counters */
|
/* starts all counters */
|
||||||
static void arc_pmu_enable(struct pmu *pmu)
|
static void arc_pmu_enable(struct pmu *pmu)
|
||||||
{
|
{
|
||||||
uint32_t tmp;
|
u32 tmp;
|
||||||
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
|
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
|
||||||
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
|
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
|
||||||
}
|
}
|
||||||
|
@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
|
||||||
/* stops all counters */
|
/* stops all counters */
|
||||||
static void arc_pmu_disable(struct pmu *pmu)
|
static void arc_pmu_disable(struct pmu *pmu)
|
||||||
{
|
{
|
||||||
uint32_t tmp;
|
u32 tmp;
|
||||||
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
|
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
|
||||||
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
|
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
|
||||||
}
|
}
|
||||||
|
@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
|
||||||
local64_set(&hwc->period_left, left);
|
local64_set(&hwc->period_left, left);
|
||||||
hwc->last_period = period;
|
hwc->last_period = period;
|
||||||
overflow = 1;
|
overflow = 1;
|
||||||
} else if (unlikely(left <= 0)) {
|
} else if (unlikely(left <= 0)) {
|
||||||
/* left underflowed by less than period. */
|
/* left underflowed by less than period. */
|
||||||
left += period;
|
left += period;
|
||||||
local64_set(&hwc->period_left, left);
|
local64_set(&hwc->period_left, left);
|
||||||
|
@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
|
||||||
write_aux_reg(ARC_REG_PCT_INDEX, idx);
|
write_aux_reg(ARC_REG_PCT_INDEX, idx);
|
||||||
|
|
||||||
/* Write value */
|
/* Write value */
|
||||||
write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value);
|
write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
|
||||||
write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32));
|
write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
|
||||||
|
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
|
|
||||||
|
@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
|
||||||
/* Enable interrupt for this counter */
|
/* Enable interrupt for this counter */
|
||||||
if (is_sampling_event(event))
|
if (is_sampling_event(event))
|
||||||
write_aux_reg(ARC_REG_PCT_INT_CTRL,
|
write_aux_reg(ARC_REG_PCT_INT_CTRL,
|
||||||
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
|
read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
|
||||||
|
|
||||||
/* enable ARC pmu here */
|
/* enable ARC pmu here */
|
||||||
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
|
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
|
||||||
|
@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
|
||||||
* Reset interrupt flag by writing of 1. This is required
|
* Reset interrupt flag by writing of 1. This is required
|
||||||
* to make sure pending interrupt was not left.
|
* to make sure pending interrupt was not left.
|
||||||
*/
|
*/
|
||||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
|
write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
|
||||||
write_aux_reg(ARC_REG_PCT_INT_CTRL,
|
write_aux_reg(ARC_REG_PCT_INT_CTRL,
|
||||||
read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx));
|
read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(event->hw.state & PERF_HES_STOPPED)) {
|
if (!(event->hw.state & PERF_HES_STOPPED)) {
|
||||||
|
@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
|
||||||
|
|
||||||
if (is_sampling_event(event)) {
|
if (is_sampling_event(event)) {
|
||||||
/* Mimic full counter overflow as other arches do */
|
/* Mimic full counter overflow as other arches do */
|
||||||
write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period);
|
write_aux_reg(ARC_REG_PCT_INT_CNTL,
|
||||||
|
lower_32_bits(arc_pmu->max_period));
|
||||||
write_aux_reg(ARC_REG_PCT_INT_CNTH,
|
write_aux_reg(ARC_REG_PCT_INT_CNTH,
|
||||||
(arc_pmu->max_period >> 32));
|
upper_32_bits(arc_pmu->max_period));
|
||||||
}
|
}
|
||||||
|
|
||||||
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
|
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
|
||||||
|
@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
|
||||||
idx = __ffs(active_ints);
|
idx = __ffs(active_ints);
|
||||||
|
|
||||||
/* Reset interrupt flag by writing of 1 */
|
/* Reset interrupt flag by writing of 1 */
|
||||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
|
write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On reset of "interrupt active" bit corresponding
|
* On reset of "interrupt active" bit corresponding
|
||||||
|
@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
|
||||||
* Now we need to re-enable interrupt for the counter.
|
* Now we need to re-enable interrupt for the counter.
|
||||||
*/
|
*/
|
||||||
write_aux_reg(ARC_REG_PCT_INT_CTRL,
|
write_aux_reg(ARC_REG_PCT_INT_CTRL,
|
||||||
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
|
read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
|
||||||
|
|
||||||
event = pmu_cpu->act_counter[idx];
|
event = pmu_cpu->act_counter[idx];
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
|
@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
|
||||||
arc_pmu_stop(event, 0);
|
arc_pmu_stop(event, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
active_ints &= ~(1U << idx);
|
active_ints &= ~BIT(idx);
|
||||||
} while (active_ints);
|
} while (active_ints);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
|
@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
|
||||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Event field occupies the bottom 15 bits of our config field */
|
||||||
|
PMU_FORMAT_ATTR(event, "config:0-14");
|
||||||
|
static struct attribute *arc_pmu_format_attrs[] = {
|
||||||
|
&format_attr_event.attr,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct attribute_group arc_pmu_format_attr_gr = {
|
||||||
|
.name = "format",
|
||||||
|
.attrs = arc_pmu_format_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *page)
|
||||||
|
{
|
||||||
|
struct perf_pmu_events_attr *pmu_attr;
|
||||||
|
|
||||||
|
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
|
||||||
|
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't add attrs here as we don't have pre-defined list of perf events.
|
||||||
|
* We will generate and add attrs dynamically in probe() after we read HW
|
||||||
|
* configuration.
|
||||||
|
*/
|
||||||
|
static struct attribute_group arc_pmu_events_attr_gr = {
|
||||||
|
.name = "events",
|
||||||
|
};
|
||||||
|
|
||||||
|
static void arc_pmu_add_raw_event_attr(int j, char *str)
|
||||||
|
{
|
||||||
|
memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
|
||||||
|
arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
|
||||||
|
arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
|
||||||
|
arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
|
||||||
|
arc_pmu->attr[j].id = j;
|
||||||
|
arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arc_pmu_raw_alloc(struct device *dev)
|
||||||
|
{
|
||||||
|
arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
|
||||||
|
sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!arc_pmu->attr)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
|
||||||
|
sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!arc_pmu->attrs)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
|
||||||
|
sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!arc_pmu->raw_entry)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool event_in_hw_event_map(int i, char *name)
|
||||||
|
{
|
||||||
|
if (!arc_pmu_ev_hw_map[i])
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!strlen(arc_pmu_ev_hw_map[i]))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (strcmp(arc_pmu_ev_hw_map[i], name))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arc_pmu_map_hw_event(int j, char *str)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* See if HW condition has been mapped to a perf event_id */
|
||||||
|
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
|
||||||
|
if (event_in_hw_event_map(i, str)) {
|
||||||
|
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
|
||||||
|
i, str, j);
|
||||||
|
arc_pmu->ev_hw_idx[i] = j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int arc_pmu_device_probe(struct platform_device *pdev)
|
static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct arc_reg_pct_build pct_bcr;
|
struct arc_reg_pct_build pct_bcr;
|
||||||
struct arc_reg_cc_build cc_bcr;
|
struct arc_reg_cc_build cc_bcr;
|
||||||
int i, j, has_interrupts;
|
int i, has_interrupts;
|
||||||
int counter_size; /* in bits */
|
int counter_size; /* in bits */
|
||||||
|
|
||||||
union cc_name {
|
union cc_name {
|
||||||
struct {
|
struct {
|
||||||
uint32_t word0, word1;
|
u32 word0, word1;
|
||||||
char sentinel;
|
char sentinel;
|
||||||
} indiv;
|
} indiv;
|
||||||
char str[9];
|
char str[ARCPMU_EVENT_NAME_LEN];
|
||||||
} cc_name;
|
} cc_name;
|
||||||
|
|
||||||
|
|
||||||
|
@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
|
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
|
||||||
BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
|
if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
|
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
|
||||||
BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
|
if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
|
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
|
||||||
if (!arc_pmu)
|
if (!arc_pmu)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
arc_pmu->n_events = cc_bcr.c;
|
||||||
|
|
||||||
|
if (arc_pmu_raw_alloc(&pdev->dev))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
|
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
|
||||||
|
|
||||||
arc_pmu->n_counters = pct_bcr.c;
|
arc_pmu->n_counters = pct_bcr.c;
|
||||||
|
@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
|
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
|
||||||
arc_pmu->n_counters, counter_size, cc_bcr.c,
|
arc_pmu->n_counters, counter_size, cc_bcr.c,
|
||||||
has_interrupts ? ", [overflow IRQ support]":"");
|
has_interrupts ? ", [overflow IRQ support]" : "");
|
||||||
|
|
||||||
cc_name.str[8] = 0;
|
cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
|
||||||
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
|
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
|
||||||
arc_pmu->ev_hw_idx[i] = -1;
|
arc_pmu->ev_hw_idx[i] = -1;
|
||||||
|
|
||||||
/* loop thru all available h/w condition indexes */
|
/* loop thru all available h/w condition indexes */
|
||||||
for (j = 0; j < cc_bcr.c; j++) {
|
for (i = 0; i < cc_bcr.c; i++) {
|
||||||
write_aux_reg(ARC_REG_CC_INDEX, j);
|
write_aux_reg(ARC_REG_CC_INDEX, i);
|
||||||
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
|
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
|
||||||
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
|
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
|
||||||
|
|
||||||
/* See if it has been mapped to a perf event_id */
|
arc_pmu_map_hw_event(i, cc_name.str);
|
||||||
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
|
arc_pmu_add_raw_event_attr(i, cc_name.str);
|
||||||
if (arc_pmu_ev_hw_map[i] &&
|
|
||||||
!strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
|
|
||||||
strlen(arc_pmu_ev_hw_map[i])) {
|
|
||||||
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
|
|
||||||
i, cc_name.str, j);
|
|
||||||
arc_pmu->ev_hw_idx[i] = j;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
|
||||||
|
arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
|
||||||
|
arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
|
||||||
|
|
||||||
arc_pmu->pmu = (struct pmu) {
|
arc_pmu->pmu = (struct pmu) {
|
||||||
.pmu_enable = arc_pmu_enable,
|
.pmu_enable = arc_pmu_enable,
|
||||||
.pmu_disable = arc_pmu_disable,
|
.pmu_disable = arc_pmu_disable,
|
||||||
|
@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||||
.start = arc_pmu_start,
|
.start = arc_pmu_start,
|
||||||
.stop = arc_pmu_stop,
|
.stop = arc_pmu_stop,
|
||||||
.read = arc_pmu_read,
|
.read = arc_pmu_read,
|
||||||
|
.attr_groups = arc_pmu->attr_groups,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (has_interrupts) {
|
if (has_interrupts) {
|
||||||
|
@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||||
} else
|
} else
|
||||||
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||||
|
|
||||||
return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
|
/*
|
||||||
|
* perf parser doesn't really like '-' symbol in events name, so let's
|
||||||
|
* use '_' in arc pct name as it goes to kernel PMU event prefix.
|
||||||
|
*/
|
||||||
|
return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OF
|
|
||||||
static const struct of_device_id arc_pmu_match[] = {
|
static const struct of_device_id arc_pmu_match[] = {
|
||||||
{ .compatible = "snps,arc700-pct" },
|
{ .compatible = "snps,arc700-pct" },
|
||||||
{ .compatible = "snps,archs-pct" },
|
{ .compatible = "snps,archs-pct" },
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, arc_pmu_match);
|
MODULE_DEVICE_TABLE(of, arc_pmu_match);
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct platform_driver arc_pmu_driver = {
|
static struct platform_driver arc_pmu_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
|
|
|
@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
|
||||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||||
const struct id_to_str *tbl;
|
const struct id_to_str *tbl;
|
||||||
struct bcr_isa_arcv2 isa;
|
struct bcr_isa_arcv2 isa;
|
||||||
|
struct bcr_actionpoint ap;
|
||||||
|
|
||||||
FIX_PTR(cpu);
|
FIX_PTR(cpu);
|
||||||
|
|
||||||
|
@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void)
|
||||||
cpu->bpu.full = bpu.ft;
|
cpu->bpu.full = bpu.ft;
|
||||||
cpu->bpu.num_cache = 256 << bpu.bce;
|
cpu->bpu.num_cache = 256 << bpu.bce;
|
||||||
cpu->bpu.num_pred = 2048 << bpu.pte;
|
cpu->bpu.num_pred = 2048 << bpu.pte;
|
||||||
|
cpu->bpu.ret_stk = 4 << bpu.rse;
|
||||||
|
|
||||||
if (cpu->core.family >= 0x54) {
|
if (cpu->core.family >= 0x54) {
|
||||||
unsigned int exec_ctrl;
|
unsigned int exec_ctrl;
|
||||||
|
@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
READ_BCR(ARC_REG_AP_BCR, bcr);
|
READ_BCR(ARC_REG_AP_BCR, ap);
|
||||||
cpu->extn.ap = bcr.ver ? 1 : 0;
|
if (ap.ver) {
|
||||||
|
cpu->extn.ap_num = 2 << ap.num;
|
||||||
|
cpu->extn.ap_full = !!ap.min;
|
||||||
|
}
|
||||||
|
|
||||||
READ_BCR(ARC_REG_SMART_BCR, bcr);
|
READ_BCR(ARC_REG_SMART_BCR, bcr);
|
||||||
cpu->extn.smart = bcr.ver ? 1 : 0;
|
cpu->extn.smart = bcr.ver ? 1 : 0;
|
||||||
|
@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void)
|
||||||
READ_BCR(ARC_REG_RTT_BCR, bcr);
|
READ_BCR(ARC_REG_RTT_BCR, bcr);
|
||||||
cpu->extn.rtt = bcr.ver ? 1 : 0;
|
cpu->extn.rtt = bcr.ver ? 1 : 0;
|
||||||
|
|
||||||
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
|
|
||||||
|
|
||||||
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
|
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
|
||||||
|
|
||||||
/* some hacks for lack of feature BCR info in old ARC700 cores */
|
/* some hacks for lack of feature BCR info in old ARC700 cores */
|
||||||
|
@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
|
||||||
|
|
||||||
if (cpu->bpu.ver)
|
if (cpu->bpu.ver)
|
||||||
n += scnprintf(buf + n, len - n,
|
n += scnprintf(buf + n, len - n,
|
||||||
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
|
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
|
||||||
IS_AVAIL1(cpu->bpu.full, "full"),
|
IS_AVAIL1(cpu->bpu.full, "full"),
|
||||||
IS_AVAIL1(!cpu->bpu.full, "partial"),
|
IS_AVAIL1(!cpu->bpu.full, "partial"),
|
||||||
cpu->bpu.num_cache, cpu->bpu.num_pred);
|
cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
|
||||||
|
|
||||||
if (is_isa_arcv2()) {
|
if (is_isa_arcv2()) {
|
||||||
struct bcr_lpb lpb;
|
struct bcr_lpb lpb;
|
||||||
|
@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
|
||||||
IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
|
IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
|
||||||
IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
|
IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
|
||||||
|
|
||||||
if (cpu->extn.debug)
|
if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
|
||||||
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n",
|
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
|
||||||
IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
|
|
||||||
IS_AVAIL1(cpu->extn.smart, "smaRT "),
|
IS_AVAIL1(cpu->extn.smart, "smaRT "),
|
||||||
IS_AVAIL1(cpu->extn.rtt, "RTT "));
|
IS_AVAIL1(cpu->extn.rtt, "RTT "));
|
||||||
|
if (cpu->extn.ap_num) {
|
||||||
|
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
|
||||||
|
cpu->extn.ap_num,
|
||||||
|
cpu->extn.ap_full ? "full":"min");
|
||||||
|
}
|
||||||
|
n += scnprintf(buf + n, len - n, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu->dccm.sz || cpu->iccm.sz)
|
if (cpu->dccm.sz || cpu->iccm.sz)
|
||||||
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
|
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
#include <asm/arcregs.h>
|
#include <asm/arcregs.h>
|
||||||
#include <asm/irqflags.h>
|
#include <asm/irqflags.h>
|
||||||
|
|
||||||
|
#define ARC_PATH_MAX 256
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
|
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
|
||||||
* -Prints 3 regs per line and a CR.
|
* -Prints 3 regs per line and a CR.
|
||||||
|
@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
|
||||||
print_reg_file(&(cregs->r13), 13);
|
print_reg_file(&(cregs->r13), 13);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
|
static void print_task_path_n_nm(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
char *path_nm = NULL;
|
char *path_nm = NULL;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
struct file *exe_file;
|
struct file *exe_file;
|
||||||
|
char buf[ARC_PATH_MAX];
|
||||||
|
|
||||||
mm = get_task_mm(tsk);
|
mm = get_task_mm(tsk);
|
||||||
if (!mm)
|
if (!mm)
|
||||||
|
@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
if (exe_file) {
|
if (exe_file) {
|
||||||
path_nm = file_path(exe_file, buf, 255);
|
path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
|
||||||
fput(exe_file);
|
fput(exe_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,10 +83,9 @@ done:
|
||||||
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
|
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_faulting_vma(unsigned long address, char *buf)
|
static void show_faulting_vma(unsigned long address)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
char *nm = buf;
|
|
||||||
struct mm_struct *active_mm = current->active_mm;
|
struct mm_struct *active_mm = current->active_mm;
|
||||||
|
|
||||||
/* can't use print_vma_addr() yet as it doesn't check for
|
/* can't use print_vma_addr() yet as it doesn't check for
|
||||||
|
@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
|
||||||
* if the container VMA is not found
|
* if the container VMA is not found
|
||||||
*/
|
*/
|
||||||
if (vma && (vma->vm_start <= address)) {
|
if (vma && (vma->vm_start <= address)) {
|
||||||
|
char buf[ARC_PATH_MAX];
|
||||||
|
char *nm = "?";
|
||||||
|
|
||||||
if (vma->vm_file) {
|
if (vma->vm_file) {
|
||||||
nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
|
nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
|
||||||
if (IS_ERR(nm))
|
if (IS_ERR(nm))
|
||||||
nm = "?";
|
nm = "?";
|
||||||
}
|
}
|
||||||
|
@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
struct callee_regs *cregs;
|
struct callee_regs *cregs;
|
||||||
char *buf;
|
|
||||||
|
|
||||||
buf = (char *)__get_free_page(GFP_KERNEL);
|
/*
|
||||||
if (!buf)
|
* generic code calls us with preemption disabled, but some calls
|
||||||
return;
|
* here could sleep, so re-enable to avoid lockdep splat
|
||||||
|
*/
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
print_task_path_n_nm(tsk, buf);
|
print_task_path_n_nm(tsk);
|
||||||
show_regs_print_info(KERN_INFO);
|
show_regs_print_info(KERN_INFO);
|
||||||
|
|
||||||
show_ecr_verbose(regs);
|
show_ecr_verbose(regs);
|
||||||
|
@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
|
||||||
(void *)regs->blink, (void *)regs->ret);
|
(void *)regs->blink, (void *)regs->ret);
|
||||||
|
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
show_faulting_vma(regs->ret, buf); /* faulting code, not data */
|
show_faulting_vma(regs->ret); /* faulting code, not data */
|
||||||
|
|
||||||
pr_info("[STAT32]: 0x%08lx", regs->status32);
|
pr_info("[STAT32]: 0x%08lx", regs->status32);
|
||||||
|
|
||||||
|
@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
|
||||||
if (cregs)
|
if (cregs)
|
||||||
show_callee_regs(cregs);
|
show_callee_regs(cregs);
|
||||||
|
|
||||||
free_page((unsigned long)buf);
|
preempt_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
|
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
|
||||||
|
|
|
@ -7,11 +7,39 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/cache.h>
|
||||||
|
|
||||||
#undef PREALLOC_NOT_AVAIL
|
/*
|
||||||
|
* The memset implementation below is optimized to use prefetchw and prealloc
|
||||||
|
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
|
||||||
|
* If you want to implement optimized memset for other possible L1 data cache
|
||||||
|
* line lengths (32B and 128B) you should rewrite code carefully checking
|
||||||
|
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
|
||||||
|
* don't belongs to memset area.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if L1_CACHE_SHIFT == 6
|
||||||
|
|
||||||
|
.macro PREALLOC_INSTR reg, off
|
||||||
|
prealloc [\reg, \off]
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro PREFETCHW_INSTR reg, off
|
||||||
|
prefetchw [\reg, \off]
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
.macro PREALLOC_INSTR
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro PREFETCHW_INSTR
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
ENTRY_CFI(memset)
|
ENTRY_CFI(memset)
|
||||||
prefetchw [r0] ; Prefetch the write location
|
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
|
||||||
mov.f 0, r2
|
mov.f 0, r2
|
||||||
;;; if size is zero
|
;;; if size is zero
|
||||||
jz.d [blink]
|
jz.d [blink]
|
||||||
|
@ -48,11 +76,8 @@ ENTRY_CFI(memset)
|
||||||
|
|
||||||
lpnz @.Lset64bytes
|
lpnz @.Lset64bytes
|
||||||
;; LOOP START
|
;; LOOP START
|
||||||
#ifdef PREALLOC_NOT_AVAIL
|
PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
|
||||||
prefetchw [r3, 64] ;Prefetch the next write location
|
|
||||||
#else
|
|
||||||
prealloc [r3, 64]
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_ARC_HAS_LL64
|
#ifdef CONFIG_ARC_HAS_LL64
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
|
@ -85,7 +110,6 @@ ENTRY_CFI(memset)
|
||||||
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
|
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
|
||||||
lpnz .Lset32bytes
|
lpnz .Lset32bytes
|
||||||
;; LOOP START
|
;; LOOP START
|
||||||
prefetchw [r3, 32] ;Prefetch the next write location
|
|
||||||
#ifdef CONFIG_ARC_HAS_LL64
|
#ifdef CONFIG_ARC_HAS_LL64
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
|
|
|
@ -141,12 +141,17 @@ good_area:
|
||||||
*/
|
*/
|
||||||
fault = handle_mm_fault(vma, address, flags);
|
fault = handle_mm_fault(vma, address, flags);
|
||||||
|
|
||||||
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
|
|
||||||
if (fatal_signal_pending(current)) {
|
if (fatal_signal_pending(current)) {
|
||||||
if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
|
|
||||||
up_read(&mm->mmap_sem);
|
/*
|
||||||
if (user_mode(regs))
|
* if fault retry, mmap_sem already relinquished by core mm
|
||||||
|
* so OK to return to user mode (with signal handled first)
|
||||||
|
*/
|
||||||
|
if (fault & VM_FAULT_RETRY) {
|
||||||
|
if (!user_mode(regs))
|
||||||
|
goto no_context;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||||
|
|
|
@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
memblock_add_node(low_mem_start, low_mem_sz, 0);
|
memblock_add_node(low_mem_start, low_mem_sz, 0);
|
||||||
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
|
memblock_reserve(CONFIG_LINUX_LINK_BASE,
|
||||||
|
__pa(_end) - CONFIG_LINUX_LINK_BASE);
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
if (phys_initrd_size) {
|
if (phys_initrd_size) {
|
||||||
|
|
|
@ -1 +1,95 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
|
||||||
|
#define _ASM_ARM_XEN_PAGE_COHERENT_H
|
||||||
|
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <asm/page.h>
|
||||||
#include <xen/arm/page-coherent.h>
|
#include <xen/arm/page-coherent.h>
|
||||||
|
|
||||||
|
static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
|
||||||
|
{
|
||||||
|
if (dev && dev->archdata.dev_dma_ops)
|
||||||
|
return dev->archdata.dev_dma_ops;
|
||||||
|
return get_arch_dma_ops(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
|
||||||
|
{
|
||||||
|
return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
|
||||||
|
{
|
||||||
|
xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||||
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
unsigned long page_pfn = page_to_xen_pfn(page);
|
||||||
|
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
|
||||||
|
unsigned long compound_pages =
|
||||||
|
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
|
||||||
|
bool local = (page_pfn <= dev_pfn) &&
|
||||||
|
(dev_pfn - page_pfn < compound_pages);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Dom0 is mapped 1:1, while the Linux page can span across
|
||||||
|
* multiple Xen pages, it's not possible for it to contain a
|
||||||
|
* mix of local and foreign Xen pages. So if the first xen_pfn
|
||||||
|
* == mfn the page is local otherwise it's a foreign page
|
||||||
|
* grant-mapped in dom0. If the page is local we can safely
|
||||||
|
* call the native dma_ops function, otherwise we call the xen
|
||||||
|
* specific function.
|
||||||
|
*/
|
||||||
|
if (local)
|
||||||
|
xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
||||||
|
else
|
||||||
|
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||||
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
/*
|
||||||
|
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
|
||||||
|
* multiple Xen page, it's not possible to have a mix of local and
|
||||||
|
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
|
||||||
|
* foreign mfn will always return false. If the page is local we can
|
||||||
|
* safely call the native dma_ops function, otherwise we call the xen
|
||||||
|
* specific function.
|
||||||
|
*/
|
||||||
|
if (pfn_valid(pfn)) {
|
||||||
|
if (xen_get_dma_ops(hwdev)->unmap_page)
|
||||||
|
xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
||||||
|
} else
|
||||||
|
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||||
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
if (pfn_valid(pfn)) {
|
||||||
|
if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
|
||||||
|
xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
||||||
|
} else
|
||||||
|
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||||
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
if (pfn_valid(pfn)) {
|
||||||
|
if (xen_get_dma_ops(hwdev)->sync_single_for_device)
|
||||||
|
xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
||||||
|
} else
|
||||||
|
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
|
||||||
|
|
|
@ -20,9 +20,6 @@ struct dev_archdata {
|
||||||
#ifdef CONFIG_IOMMU_API
|
#ifdef CONFIG_IOMMU_API
|
||||||
void *iommu; /* private IOMMU data */
|
void *iommu; /* private IOMMU data */
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_XEN
|
|
||||||
const struct dma_map_ops *dev_dma_ops;
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pdev_archdata {
|
struct pdev_archdata {
|
||||||
|
|
|
@ -1 +1,77 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
|
||||||
|
#define _ASM_ARM64_XEN_PAGE_COHERENT_H
|
||||||
|
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <asm/page.h>
|
||||||
#include <xen/arm/page-coherent.h>
|
#include <xen/arm/page-coherent.h>
|
||||||
|
|
||||||
|
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
|
||||||
|
{
|
||||||
|
return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
|
||||||
|
{
|
||||||
|
dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||||
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
|
||||||
|
if (pfn_valid(pfn))
|
||||||
|
dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
|
||||||
|
else
|
||||||
|
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||||
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
if (pfn_valid(pfn))
|
||||||
|
dma_direct_sync_single_for_device(hwdev, handle, size, dir);
|
||||||
|
else
|
||||||
|
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||||
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
unsigned long page_pfn = page_to_xen_pfn(page);
|
||||||
|
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
|
||||||
|
unsigned long compound_pages =
|
||||||
|
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
|
||||||
|
bool local = (page_pfn <= dev_pfn) &&
|
||||||
|
(dev_pfn - page_pfn < compound_pages);
|
||||||
|
|
||||||
|
if (local)
|
||||||
|
dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
|
||||||
|
else
|
||||||
|
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||||
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
unsigned long pfn = PFN_DOWN(handle);
|
||||||
|
/*
|
||||||
|
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
|
||||||
|
* multiple Xen page, it's not possible to have a mix of local and
|
||||||
|
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
|
||||||
|
* foreign mfn will always return false. If the page is local we can
|
||||||
|
* safely call the native dma_ops function, otherwise we call the xen
|
||||||
|
* specific function.
|
||||||
|
*/
|
||||||
|
if (pfn_valid(pfn))
|
||||||
|
dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
|
||||||
|
else
|
||||||
|
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
|
||||||
|
|
|
@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||||
|
|
||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
if (xen_initial_domain()) {
|
if (xen_initial_domain())
|
||||||
dev->archdata.dev_dma_ops = dev->dma_ops;
|
|
||||||
dev->dma_ops = xen_dma_ops;
|
dev->dma_ops = xen_dma_ops;
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||||
atomic_set(&mm->context.flush_count, 0);
|
atomic_set(&mm->context.flush_count, 0);
|
||||||
mm->context.gmap_asce = 0;
|
mm->context.gmap_asce = 0;
|
||||||
mm->context.flush_mm = 0;
|
mm->context.flush_mm = 0;
|
||||||
mm->context.compat_mm = 0;
|
mm->context.compat_mm = test_thread_flag(TIF_31BIT);
|
||||||
#ifdef CONFIG_PGSTE
|
#ifdef CONFIG_PGSTE
|
||||||
mm->context.alloc_pgste = page_table_allocate_pgste ||
|
mm->context.alloc_pgste = page_table_allocate_pgste ||
|
||||||
test_thread_flag(TIF_PGSTE) ||
|
test_thread_flag(TIF_PGSTE) ||
|
||||||
|
@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
if (prev == next)
|
|
||||||
return;
|
|
||||||
S390_lowcore.user_asce = next->context.asce;
|
S390_lowcore.user_asce = next->context.asce;
|
||||||
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
|
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
|
||||||
/* Clear previous user-ASCE from CR1 and CR7 */
|
/* Clear previous user-ASCE from CR1 and CR7 */
|
||||||
|
@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
__ctl_load(S390_lowcore.vdso_asce, 7, 7);
|
__ctl_load(S390_lowcore.vdso_asce, 7, 7);
|
||||||
clear_cpu_flag(CIF_ASCE_SECONDARY);
|
clear_cpu_flag(CIF_ASCE_SECONDARY);
|
||||||
}
|
}
|
||||||
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
if (prev != next)
|
||||||
|
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
|
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
|
||||||
|
|
|
@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
|
||||||
if (stsi(vmms, 3, 2, 2) || !vmms->count)
|
if (stsi(vmms, 3, 2, 2) || !vmms->count)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Running under KVM? If not we assume z/VM */
|
/* Detect known hypervisors */
|
||||||
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
|
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
|
||||||
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
|
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
|
||||||
else
|
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
|
||||||
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
|
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
|
||||||
pr_info("Linux is running under KVM in 64-bit mode\n");
|
pr_info("Linux is running under KVM in 64-bit mode\n");
|
||||||
else if (MACHINE_IS_LPAR)
|
else if (MACHINE_IS_LPAR)
|
||||||
pr_info("Linux is running natively in 64-bit mode\n");
|
pr_info("Linux is running natively in 64-bit mode\n");
|
||||||
|
else
|
||||||
|
pr_info("Linux is running as a guest in 64-bit mode\n");
|
||||||
|
|
||||||
/* Have one command line that is parsed and saved in /proc/cmdline */
|
/* Have one command line that is parsed and saved in /proc/cmdline */
|
||||||
/* boot_command_line has been already set up in early.c */
|
/* boot_command_line has been already set up in early.c */
|
||||||
|
|
|
@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
|
||||||
*/
|
*/
|
||||||
void smp_call_ipl_cpu(void (*func)(void *), void *data)
|
void smp_call_ipl_cpu(void (*func)(void *), void *data)
|
||||||
{
|
{
|
||||||
|
struct lowcore *lc = pcpu_devices->lowcore;
|
||||||
|
|
||||||
|
if (pcpu_devices[0].address == stap())
|
||||||
|
lc = &S390_lowcore;
|
||||||
|
|
||||||
pcpu_delegate(&pcpu_devices[0], func, data,
|
pcpu_delegate(&pcpu_devices[0], func, data,
|
||||||
pcpu_devices->lowcore->nodat_stack);
|
lc->nodat_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
int smp_find_processor_id(u16 address)
|
int smp_find_processor_id(u16 address)
|
||||||
|
@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
rc = lock_device_hotplug_sysfs();
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
rc = smp_rescan_cpus();
|
rc = smp_rescan_cpus();
|
||||||
|
unlock_device_hotplug();
|
||||||
return rc ? rc : count;
|
return rc ? rc : count;
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_WO(rescan);
|
static DEVICE_ATTR_WO(rescan);
|
||||||
|
|
|
@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
|
|
||||||
vdso_pages = vdso64_pages;
|
vdso_pages = vdso64_pages;
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
if (is_compat_task()) {
|
mm->context.compat_mm = is_compat_task();
|
||||||
|
if (mm->context.compat_mm)
|
||||||
vdso_pages = vdso32_pages;
|
vdso_pages = vdso32_pages;
|
||||||
mm->context.compat_mm = 1;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* vDSO has a problem and was disabled, just don't "enable" it for
|
* vDSO has a problem and was disabled, just don't "enable" it for
|
||||||
|
|
|
@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
#else
|
#else
|
||||||
u64 ipi_bitmap = 0;
|
u64 ipi_bitmap = 0;
|
||||||
#endif
|
#endif
|
||||||
|
long ret;
|
||||||
|
|
||||||
if (cpumask_empty(mask))
|
if (cpumask_empty(mask))
|
||||||
return;
|
return;
|
||||||
|
@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
|
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
|
||||||
max = apic_id < max ? max : apic_id;
|
max = apic_id < max ? max : apic_id;
|
||||||
} else {
|
} else {
|
||||||
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
||||||
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
||||||
|
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
|
||||||
min = max = apic_id;
|
min = max = apic_id;
|
||||||
ipi_bitmap = 0;
|
ipi_bitmap = 0;
|
||||||
}
|
}
|
||||||
|
@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ipi_bitmap) {
|
if (ipi_bitmap) {
|
||||||
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
||||||
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
||||||
|
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
|
|
||||||
ccflags-y += -Iarch/x86/kvm
|
ccflags-y += -Iarch/x86/kvm
|
||||||
|
|
||||||
CFLAGS_x86.o := -I.
|
|
||||||
CFLAGS_svm.o := -I.
|
|
||||||
CFLAGS_vmx.o := -I.
|
|
||||||
|
|
||||||
KVM := ../../../virt/kvm
|
KVM := ../../../virt/kvm
|
||||||
|
|
||||||
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
|
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
|
||||||
|
|
|
@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||||
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
|
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
|
||||||
if (ret != HV_STATUS_INVALID_PORT_ID)
|
if (ret != HV_STATUS_INVALID_PORT_ID)
|
||||||
break;
|
break;
|
||||||
/* maybe userspace knows this conn_id: fall through */
|
/* fall through - maybe userspace knows this conn_id. */
|
||||||
case HVCALL_POST_MESSAGE:
|
case HVCALL_POST_MESSAGE:
|
||||||
/* don't bother userspace if it has no way to handle it */
|
/* don't bother userspace if it has no way to handle it */
|
||||||
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
|
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
|
||||||
|
@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||||
ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
|
ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
|
||||||
ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
|
ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
|
||||||
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
|
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
|
||||||
ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
|
|
||||||
ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
|
ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
|
||||||
ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
|
ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
|
||||||
|
|
||||||
|
@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||||
case HYPERV_CPUID_ENLIGHTMENT_INFO:
|
case HYPERV_CPUID_ENLIGHTMENT_INFO:
|
||||||
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
|
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
|
|
||||||
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
|
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
|
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
if (evmcs_ver)
|
||||||
|
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default number of spinlock retry attempts, matches
|
* Default number of spinlock retry attempts, matches
|
||||||
|
|
|
@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
||||||
switch (delivery_mode) {
|
switch (delivery_mode) {
|
||||||
case APIC_DM_LOWEST:
|
case APIC_DM_LOWEST:
|
||||||
vcpu->arch.apic_arb_prio++;
|
vcpu->arch.apic_arb_prio++;
|
||||||
|
/* fall through */
|
||||||
case APIC_DM_FIXED:
|
case APIC_DM_FIXED:
|
||||||
if (unlikely(trig_mode && !level))
|
if (unlikely(trig_mode && !level))
|
||||||
break;
|
break;
|
||||||
|
@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||||
|
|
||||||
case APIC_LVT0:
|
case APIC_LVT0:
|
||||||
apic_manage_nmi_watchdog(apic, val);
|
apic_manage_nmi_watchdog(apic, val);
|
||||||
|
/* fall through */
|
||||||
case APIC_LVTTHMR:
|
case APIC_LVTTHMR:
|
||||||
case APIC_LVTPC:
|
case APIC_LVTPC:
|
||||||
case APIC_LVT1:
|
case APIC_LVT1:
|
||||||
|
|
|
@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
|
||||||
rsvd_bits(maxphyaddr, 51);
|
rsvd_bits(maxphyaddr, 51);
|
||||||
rsvd_check->rsvd_bits_mask[1][4] =
|
rsvd_check->rsvd_bits_mask[1][4] =
|
||||||
rsvd_check->rsvd_bits_mask[0][4];
|
rsvd_check->rsvd_bits_mask[0][4];
|
||||||
|
/* fall through */
|
||||||
case PT64_ROOT_4LEVEL:
|
case PT64_ROOT_4LEVEL:
|
||||||
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
|
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
|
||||||
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
|
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
|
||||||
|
|
|
@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
|
||||||
kvm_mmu_reset_context(&svm->vcpu);
|
kvm_mmu_reset_context(&svm->vcpu);
|
||||||
kvm_mmu_load(&svm->vcpu);
|
kvm_mmu_load(&svm->vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drop what we picked up for L2 via svm_complete_interrupts() so it
|
||||||
|
* doesn't end up in L1.
|
||||||
|
*/
|
||||||
|
svm->vcpu.arch.nmi_injected = false;
|
||||||
|
kvm_clear_exception_queue(&svm->vcpu);
|
||||||
|
kvm_clear_interrupt_queue(&svm->vcpu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||||
case MSR_IA32_APICBASE:
|
case MSR_IA32_APICBASE:
|
||||||
if (kvm_vcpu_apicv_active(vcpu))
|
if (kvm_vcpu_apicv_active(vcpu))
|
||||||
avic_update_vapic_bar(to_svm(vcpu), data);
|
avic_update_vapic_bar(to_svm(vcpu), data);
|
||||||
/* Follow through */
|
/* Fall through */
|
||||||
default:
|
default:
|
||||||
return kvm_set_msr_common(vcpu, msr);
|
return kvm_set_msr_common(vcpu, msr);
|
||||||
}
|
}
|
||||||
|
@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
|
||||||
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
||||||
int i;
|
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
struct kvm *kvm = svm->vcpu.kvm;
|
|
||||||
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point, we expect that the AVIC HW has already
|
* Update ICR high and low, then emulate sending IPI,
|
||||||
* set the appropriate IRR bits on the valid target
|
* which is handled when writing APIC_ICR.
|
||||||
* vcpus. So, we just need to kick the appropriate vcpu.
|
|
||||||
*/
|
*/
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
|
||||||
bool m = kvm_apic_match_dest(vcpu, apic,
|
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||||
icrl & KVM_APIC_SHORT_MASK,
|
|
||||||
GET_APIC_DEST_FIELD(icrh),
|
|
||||||
icrl & KVM_APIC_DEST_MASK);
|
|
||||||
|
|
||||||
if (m && !avic_vcpu_is_running(vcpu))
|
|
||||||
kvm_vcpu_wake_up(vcpu);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||||
|
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
|
||||||
|
index, svm->vcpu.vcpu_id, icrh, icrl);
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||||
WARN_ONCE(1, "Invalid backing page\n");
|
WARN_ONCE(1, "Invalid backing page\n");
|
||||||
|
|
|
@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
|
||||||
#endif /* _TRACE_KVM_H */
|
#endif /* _TRACE_KVM_H */
|
||||||
|
|
||||||
#undef TRACE_INCLUDE_PATH
|
#undef TRACE_INCLUDE_PATH
|
||||||
#define TRACE_INCLUDE_PATH arch/x86/kvm
|
#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
|
||||||
#undef TRACE_INCLUDE_FILE
|
#undef TRACE_INCLUDE_FILE
|
||||||
#define TRACE_INCLUDE_FILE trace
|
#define TRACE_INCLUDE_FILE trace
|
||||||
|
|
||||||
|
|
|
@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
|
||||||
uint16_t *vmcs_version)
|
uint16_t *vmcs_version)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
|
||||||
|
|
||||||
|
vmx->nested.enlightened_vmcs_enabled = true;
|
||||||
|
|
||||||
if (vmcs_version)
|
if (vmcs_version)
|
||||||
*vmcs_version = nested_get_evmcs_version(vcpu);
|
*vmcs_version = nested_get_evmcs_version(vcpu);
|
||||||
|
|
||||||
/* We don't support disabling the feature for simplicity. */
|
/* We don't support disabling the feature for simplicity. */
|
||||||
if (vmx->nested.enlightened_vmcs_enabled)
|
if (evmcs_already_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
vmx->nested.enlightened_vmcs_enabled = true;
|
|
||||||
|
|
||||||
vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
||||||
vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
||||||
vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
||||||
|
|
|
@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
|
||||||
static int max_shadow_read_write_fields =
|
static int max_shadow_read_write_fields =
|
||||||
ARRAY_SIZE(shadow_read_write_fields);
|
ARRAY_SIZE(shadow_read_write_fields);
|
||||||
|
|
||||||
void init_vmcs_shadow_fields(void)
|
static void init_vmcs_shadow_fields(void)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
@ -4140,11 +4140,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto out_vmcs02;
|
goto out_vmcs02;
|
||||||
|
|
||||||
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||||
if (!vmx->nested.cached_vmcs12)
|
if (!vmx->nested.cached_vmcs12)
|
||||||
goto out_cached_vmcs12;
|
goto out_cached_vmcs12;
|
||||||
|
|
||||||
vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||||
if (!vmx->nested.cached_shadow_vmcs12)
|
if (!vmx->nested.cached_shadow_vmcs12)
|
||||||
goto out_cached_shadow_vmcs12;
|
goto out_cached_shadow_vmcs12;
|
||||||
|
|
||||||
|
@ -5263,13 +5263,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
|
||||||
copy_shadow_to_vmcs12(vmx);
|
copy_shadow_to_vmcs12(vmx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
|
/*
|
||||||
|
* Copy over the full allocated size of vmcs12 rather than just the size
|
||||||
|
* of the struct.
|
||||||
|
*/
|
||||||
|
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
||||||
vmcs12->vmcs_link_pointer != -1ull) {
|
vmcs12->vmcs_link_pointer != -1ull) {
|
||||||
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
|
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
|
||||||
get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
|
get_shadow_vmcs12(vcpu), VMCS12_SIZE))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
|
||||||
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
|
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
|
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
struct kvm_tlb_range *range = data;
|
struct kvm_tlb_range *range = data;
|
||||||
|
@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
if (!msr_info->host_initiated &&
|
if (!msr_info->host_initiated &&
|
||||||
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
|
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
|
||||||
return 1;
|
return 1;
|
||||||
/* Otherwise falls through */
|
/* Else, falls through */
|
||||||
default:
|
default:
|
||||||
msr = find_msr_entry(vmx, msr_info->index);
|
msr = find_msr_entry(vmx, msr_info->index);
|
||||||
if (msr) {
|
if (msr) {
|
||||||
|
@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
/* Check reserved bit, higher 32 bits should be zero */
|
/* Check reserved bit, higher 32 bits should be zero */
|
||||||
if ((data >> 32) != 0)
|
if ((data >> 32) != 0)
|
||||||
return 1;
|
return 1;
|
||||||
/* Otherwise falls through */
|
/* Else, falls through */
|
||||||
default:
|
default:
|
||||||
msr = find_msr_entry(vmx, msr_index);
|
msr = find_msr_entry(vmx, msr_index);
|
||||||
if (msr) {
|
if (msr) {
|
||||||
|
@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
|
||||||
case 37: /* AAT100 */
|
case 37: /* AAT100 */
|
||||||
case 44: /* BC86,AAY89,BD102 */
|
case 44: /* BC86,AAY89,BD102 */
|
||||||
case 46: /* BA97 */
|
case 46: /* BA97 */
|
||||||
_vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
_vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
|
pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
|
||||||
"does not work properly. Using workaround\n");
|
"does not work properly. Using workaround\n");
|
||||||
|
@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
|
||||||
vmx->loaded_vmcs->hv_timer_armed = false;
|
vmx->loaded_vmcs->hv_timer_armed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
unsigned long evmcs_rsp;
|
||||||
unsigned long cr3, cr4, evmcs_rsp;
|
|
||||||
|
|
||||||
/* Record the guest's net vcpu time for enforced NMI injections. */
|
|
||||||
if (unlikely(!enable_vnmi &&
|
|
||||||
vmx->loaded_vmcs->soft_vnmi_blocked))
|
|
||||||
vmx->loaded_vmcs->entry_time = ktime_get();
|
|
||||||
|
|
||||||
/* Don't enter VMX if guest state is invalid, let the exit handler
|
|
||||||
start emulation until we arrive back to a valid state */
|
|
||||||
if (vmx->emulation_required)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (vmx->ple_window_dirty) {
|
|
||||||
vmx->ple_window_dirty = false;
|
|
||||||
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vmx->nested.need_vmcs12_sync)
|
|
||||||
nested_sync_from_vmcs12(vcpu);
|
|
||||||
|
|
||||||
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
||||||
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
|
|
||||||
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
||||||
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
||||||
|
|
||||||
cr3 = __get_current_cr3_fast();
|
|
||||||
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
|
|
||||||
vmcs_writel(HOST_CR3, cr3);
|
|
||||||
vmx->loaded_vmcs->host_state.cr3 = cr3;
|
|
||||||
}
|
|
||||||
|
|
||||||
cr4 = cr4_read_shadow();
|
|
||||||
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
|
|
||||||
vmcs_writel(HOST_CR4, cr4);
|
|
||||||
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* When single-stepping over STI and MOV SS, we must clear the
|
|
||||||
* corresponding interruptibility bits in the guest state. Otherwise
|
|
||||||
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
|
||||||
* exceptions being set, but that's not correct for the guest debugging
|
|
||||||
* case. */
|
|
||||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
||||||
vmx_set_interrupt_shadow(vcpu, 0);
|
|
||||||
|
|
||||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
|
||||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
|
||||||
vcpu->arch.pkru != vmx->host_pkru)
|
|
||||||
__write_pkru(vcpu->arch.pkru);
|
|
||||||
|
|
||||||
pt_guest_enter(vmx);
|
|
||||||
|
|
||||||
atomic_switch_perf_msrs(vmx);
|
|
||||||
|
|
||||||
vmx_update_hv_timer(vcpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
|
||||||
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
|
||||||
* is no need to worry about the conditional branch over the wrmsr
|
|
||||||
* being speculatively taken.
|
|
||||||
*/
|
|
||||||
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
|
||||||
|
|
||||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||||
|
|
||||||
|
@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
, "eax", "ebx", "edi"
|
, "eax", "ebx", "edi"
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
|
||||||
|
|
||||||
|
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
unsigned long cr3, cr4;
|
||||||
|
|
||||||
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
||||||
|
if (unlikely(!enable_vnmi &&
|
||||||
|
vmx->loaded_vmcs->soft_vnmi_blocked))
|
||||||
|
vmx->loaded_vmcs->entry_time = ktime_get();
|
||||||
|
|
||||||
|
/* Don't enter VMX if guest state is invalid, let the exit handler
|
||||||
|
start emulation until we arrive back to a valid state */
|
||||||
|
if (vmx->emulation_required)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (vmx->ple_window_dirty) {
|
||||||
|
vmx->ple_window_dirty = false;
|
||||||
|
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vmx->nested.need_vmcs12_sync)
|
||||||
|
nested_sync_from_vmcs12(vcpu);
|
||||||
|
|
||||||
|
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||||
|
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
|
||||||
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||||
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
||||||
|
|
||||||
|
cr3 = __get_current_cr3_fast();
|
||||||
|
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
|
||||||
|
vmcs_writel(HOST_CR3, cr3);
|
||||||
|
vmx->loaded_vmcs->host_state.cr3 = cr3;
|
||||||
|
}
|
||||||
|
|
||||||
|
cr4 = cr4_read_shadow();
|
||||||
|
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
|
||||||
|
vmcs_writel(HOST_CR4, cr4);
|
||||||
|
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When single-stepping over STI and MOV SS, we must clear the
|
||||||
|
* corresponding interruptibility bits in the guest state. Otherwise
|
||||||
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
||||||
|
* exceptions being set, but that's not correct for the guest debugging
|
||||||
|
* case. */
|
||||||
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||||
|
vmx_set_interrupt_shadow(vcpu, 0);
|
||||||
|
|
||||||
|
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||||
|
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||||
|
vcpu->arch.pkru != vmx->host_pkru)
|
||||||
|
__write_pkru(vcpu->arch.pkru);
|
||||||
|
|
||||||
|
pt_guest_enter(vmx);
|
||||||
|
|
||||||
|
atomic_switch_perf_msrs(vmx);
|
||||||
|
|
||||||
|
vmx_update_hv_timer(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
||||||
|
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
||||||
|
* is no need to worry about the conditional branch over the wrmsr
|
||||||
|
* being speculatively taken.
|
||||||
|
*/
|
||||||
|
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
||||||
|
|
||||||
|
__vmx_vcpu_run(vcpu, vmx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||||
|
@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
vmx_recover_nmi_blocking(vmx);
|
vmx_recover_nmi_blocking(vmx);
|
||||||
vmx_complete_interrupts(vmx);
|
vmx_complete_interrupts(vmx);
|
||||||
}
|
}
|
||||||
STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
|
|
||||||
|
|
||||||
static struct kvm *vmx_vm_alloc(void)
|
static struct kvm *vmx_vm_alloc(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||||
case KVM_CAP_HYPERV_SYNIC2:
|
case KVM_CAP_HYPERV_SYNIC2:
|
||||||
if (cap->args[0])
|
if (cap->args[0])
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
/* fall through */
|
||||||
|
|
||||||
case KVM_CAP_HYPERV_SYNIC:
|
case KVM_CAP_HYPERV_SYNIC:
|
||||||
if (!irqchip_in_kernel(vcpu->kvm))
|
if (!irqchip_in_kernel(vcpu->kvm))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -6480,8 +6482,7 @@ restart:
|
||||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||||
kvm_rip_write(vcpu, ctxt->eip);
|
kvm_rip_write(vcpu, ctxt->eip);
|
||||||
if (r == EMULATE_DONE &&
|
if (r == EMULATE_DONE && ctxt->tf)
|
||||||
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
|
||||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||||
if (!ctxt->have_exception ||
|
if (!ctxt->have_exception ||
|
||||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||||
|
@ -7093,10 +7094,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||||
case KVM_HC_CLOCK_PAIRING:
|
case KVM_HC_CLOCK_PAIRING:
|
||||||
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
|
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
case KVM_HC_SEND_IPI:
|
case KVM_HC_SEND_IPI:
|
||||||
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
|
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
default:
|
default:
|
||||||
ret = -KVM_ENOSYS;
|
ret = -KVM_ENOSYS;
|
||||||
break;
|
break;
|
||||||
|
@ -7937,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.pv.pv_unhalted = false;
|
vcpu->arch.pv.pv_unhalted = false;
|
||||||
vcpu->arch.mp_state =
|
vcpu->arch.mp_state =
|
||||||
KVM_MP_STATE_RUNNABLE;
|
KVM_MP_STATE_RUNNABLE;
|
||||||
|
/* fall through */
|
||||||
case KVM_MP_STATE_RUNNABLE:
|
case KVM_MP_STATE_RUNNABLE:
|
||||||
vcpu->arch.apf.halted = false;
|
vcpu->arch.apf.halted = false;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1083,7 +1083,18 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||||
/* Create a fresh bio_list for all subordinate requests */
|
/* Create a fresh bio_list for all subordinate requests */
|
||||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||||
bio_list_init(&bio_list_on_stack[0]);
|
bio_list_init(&bio_list_on_stack[0]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we're recursing into make_request here, ensure
|
||||||
|
* that we mark this bio as already having entered the queue.
|
||||||
|
* If not, and the queue is going away, we can get stuck
|
||||||
|
* forever on waiting for the queue reference to drop. But
|
||||||
|
* that will never happen, as we're already holding a
|
||||||
|
* reference to it.
|
||||||
|
*/
|
||||||
|
bio_set_flag(bio, BIO_QUEUE_ENTERED);
|
||||||
ret = q->make_request_fn(q, bio);
|
ret = q->make_request_fn(q, bio);
|
||||||
|
bio_clear_flag(bio, BIO_QUEUE_ENTERED);
|
||||||
|
|
||||||
/* sort new bios into those for a lower level
|
/* sort new bios into those for a lower level
|
||||||
* and those for the same level
|
* and those for the same level
|
||||||
|
|
|
@ -272,16 +272,6 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
|
||||||
/* there isn't chance to merge the splitted bio */
|
/* there isn't chance to merge the splitted bio */
|
||||||
split->bi_opf |= REQ_NOMERGE;
|
split->bi_opf |= REQ_NOMERGE;
|
||||||
|
|
||||||
/*
|
|
||||||
* Since we're recursing into make_request here, ensure
|
|
||||||
* that we mark this bio as already having entered the queue.
|
|
||||||
* If not, and the queue is going away, we can get stuck
|
|
||||||
* forever on waiting for the queue reference to drop. But
|
|
||||||
* that will never happen, as we're already holding a
|
|
||||||
* reference to it.
|
|
||||||
*/
|
|
||||||
bio_set_flag(*bio, BIO_QUEUE_ENTERED);
|
|
||||||
|
|
||||||
bio_chain(split, *bio);
|
bio_chain(split, *bio);
|
||||||
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
|
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
|
||||||
generic_make_request(*bio);
|
generic_make_request(*bio);
|
||||||
|
|
|
@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
|
||||||
CMD_FLAG_NAME(PREFLUSH),
|
CMD_FLAG_NAME(PREFLUSH),
|
||||||
CMD_FLAG_NAME(RAHEAD),
|
CMD_FLAG_NAME(RAHEAD),
|
||||||
CMD_FLAG_NAME(BACKGROUND),
|
CMD_FLAG_NAME(BACKGROUND),
|
||||||
CMD_FLAG_NAME(NOUNMAP),
|
|
||||||
CMD_FLAG_NAME(NOWAIT),
|
CMD_FLAG_NAME(NOWAIT),
|
||||||
|
CMD_FLAG_NAME(NOUNMAP),
|
||||||
|
CMD_FLAG_NAME(HIPRI),
|
||||||
};
|
};
|
||||||
#undef CMD_FLAG_NAME
|
#undef CMD_FLAG_NAME
|
||||||
|
|
||||||
|
|
|
@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
|
||||||
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
|
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
void wbt_issue(struct rq_qos *rqos, struct request *rq)
|
static void wbt_issue(struct rq_qos *rqos, struct request *rq)
|
||||||
{
|
{
|
||||||
struct rq_wb *rwb = RQWB(rqos);
|
struct rq_wb *rwb = RQWB(rqos);
|
||||||
|
|
||||||
|
@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void wbt_requeue(struct rq_qos *rqos, struct request *rq)
|
static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
|
||||||
{
|
{
|
||||||
struct rq_wb *rwb = RQWB(rqos);
|
struct rq_wb *rwb = RQWB(rqos);
|
||||||
if (!rwb_enabled(rwb))
|
if (!rwb_enabled(rwb))
|
||||||
|
|
|
@ -409,6 +409,32 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
||||||
|
struct nd_cmd_pkg *call_pkg)
|
||||||
|
{
|
||||||
|
if (call_pkg) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (nfit_mem->family != call_pkg->nd_family)
|
||||||
|
return -ENOTTY;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
||||||
|
if (call_pkg->nd_reserved2[i])
|
||||||
|
return -EINVAL;
|
||||||
|
return call_pkg->nd_command;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
|
||||||
|
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
||||||
|
return cmd;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Force function number validation to fail since 0 is never
|
||||||
|
* published as a valid function in dsm_mask.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||||
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
|
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
|
||||||
{
|
{
|
||||||
|
@ -422,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||||
unsigned long cmd_mask, dsm_mask;
|
unsigned long cmd_mask, dsm_mask;
|
||||||
u32 offset, fw_status = 0;
|
u32 offset, fw_status = 0;
|
||||||
acpi_handle handle;
|
acpi_handle handle;
|
||||||
unsigned int func;
|
|
||||||
const guid_t *guid;
|
const guid_t *guid;
|
||||||
int rc, i;
|
int func, rc, i;
|
||||||
|
|
||||||
if (cmd_rc)
|
if (cmd_rc)
|
||||||
*cmd_rc = -EINVAL;
|
*cmd_rc = -EINVAL;
|
||||||
func = cmd;
|
|
||||||
if (cmd == ND_CMD_CALL) {
|
|
||||||
call_pkg = buf;
|
|
||||||
func = call_pkg->nd_command;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
|
||||||
if (call_pkg->nd_reserved2[i])
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nvdimm) {
|
if (nvdimm) {
|
||||||
struct acpi_device *adev = nfit_mem->adev;
|
struct acpi_device *adev = nfit_mem->adev;
|
||||||
|
|
||||||
if (!adev)
|
if (!adev)
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
if (call_pkg && nfit_mem->family != call_pkg->nd_family)
|
|
||||||
return -ENOTTY;
|
|
||||||
|
|
||||||
|
if (cmd == ND_CMD_CALL)
|
||||||
|
call_pkg = buf;
|
||||||
|
func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
||||||
|
if (func < 0)
|
||||||
|
return func;
|
||||||
dimm_name = nvdimm_name(nvdimm);
|
dimm_name = nvdimm_name(nvdimm);
|
||||||
cmd_name = nvdimm_cmd_name(cmd);
|
cmd_name = nvdimm_cmd_name(cmd);
|
||||||
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
||||||
|
@ -456,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||||
} else {
|
} else {
|
||||||
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
||||||
|
|
||||||
|
func = cmd;
|
||||||
cmd_name = nvdimm_bus_cmd_name(cmd);
|
cmd_name = nvdimm_bus_cmd_name(cmd);
|
||||||
cmd_mask = nd_desc->cmd_mask;
|
cmd_mask = nd_desc->cmd_mask;
|
||||||
dsm_mask = cmd_mask;
|
dsm_mask = cmd_mask;
|
||||||
|
@ -470,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||||
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
|
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
|
|
||||||
if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
|
/*
|
||||||
|
* Check for a valid command. For ND_CMD_CALL, we also have to
|
||||||
|
* make sure that the DSM function is supported.
|
||||||
|
*/
|
||||||
|
if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
|
||||||
|
return -ENOTTY;
|
||||||
|
else if (!test_bit(cmd, &cmd_mask))
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
|
|
||||||
in_obj.type = ACPI_TYPE_PACKAGE;
|
in_obj.type = ACPI_TYPE_PACKAGE;
|
||||||
|
@ -1867,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Function 0 is the command interrogation function, don't
|
||||||
|
* export it to potential userspace use, and enable it to be
|
||||||
|
* used as an error value in acpi_nfit_ctl().
|
||||||
|
*/
|
||||||
|
dsm_mask &= ~1UL;
|
||||||
|
|
||||||
guid = to_nfit_uuid(nfit_mem->family);
|
guid = to_nfit_uuid(nfit_mem->family);
|
||||||
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
||||||
if (acpi_check_dsm(adev_dimm->handle, guid,
|
if (acpi_check_dsm(adev_dimm->handle, guid,
|
||||||
|
@ -2042,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
|
||||||
if (!nvdimm)
|
if (!nvdimm)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rc = nvdimm_security_setup_events(nvdimm);
|
|
||||||
if (rc < 0)
|
|
||||||
dev_warn(acpi_desc->dev,
|
|
||||||
"security event setup failed: %d\n", rc);
|
|
||||||
|
|
||||||
nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
|
nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
|
||||||
if (nfit_kernfs)
|
if (nfit_kernfs)
|
||||||
nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
|
nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <linux/kdev_t.h>
|
#include <linux/kdev_t.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/namei.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
#include <linux/major.h>
|
#include <linux/major.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
|
@ -20,6 +21,7 @@
|
||||||
#include <linux/parser.h>
|
#include <linux/parser.h>
|
||||||
#include <linux/radix-tree.h>
|
#include <linux/radix-tree.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/seq_file.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock_types.h>
|
#include <linux/spinlock_types.h>
|
||||||
#include <linux/stddef.h>
|
#include <linux/stddef.h>
|
||||||
|
@ -30,7 +32,7 @@
|
||||||
#include <linux/xarray.h>
|
#include <linux/xarray.h>
|
||||||
#include <uapi/asm-generic/errno-base.h>
|
#include <uapi/asm-generic/errno-base.h>
|
||||||
#include <uapi/linux/android/binder.h>
|
#include <uapi/linux/android/binder.h>
|
||||||
#include <uapi/linux/android/binder_ctl.h>
|
#include <uapi/linux/android/binderfs.h>
|
||||||
|
|
||||||
#include "binder_internal.h"
|
#include "binder_internal.h"
|
||||||
|
|
||||||
|
@ -39,13 +41,31 @@
|
||||||
#define INODE_OFFSET 3
|
#define INODE_OFFSET 3
|
||||||
#define INTSTRLEN 21
|
#define INTSTRLEN 21
|
||||||
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
|
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
|
||||||
|
/* Ensure that the initial ipc namespace always has devices available. */
|
||||||
static struct vfsmount *binderfs_mnt;
|
#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
|
||||||
|
|
||||||
static dev_t binderfs_dev;
|
static dev_t binderfs_dev;
|
||||||
static DEFINE_MUTEX(binderfs_minors_mutex);
|
static DEFINE_MUTEX(binderfs_minors_mutex);
|
||||||
static DEFINE_IDA(binderfs_minors);
|
static DEFINE_IDA(binderfs_minors);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* binderfs_mount_opts - mount options for binderfs
|
||||||
|
* @max: maximum number of allocatable binderfs binder devices
|
||||||
|
*/
|
||||||
|
struct binderfs_mount_opts {
|
||||||
|
int max;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
Opt_max,
|
||||||
|
Opt_err
|
||||||
|
};
|
||||||
|
|
||||||
|
static const match_table_t tokens = {
|
||||||
|
{ Opt_max, "max=%d" },
|
||||||
|
{ Opt_err, NULL }
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* binderfs_info - information about a binderfs mount
|
* binderfs_info - information about a binderfs mount
|
||||||
* @ipc_ns: The ipc namespace the binderfs mount belongs to.
|
* @ipc_ns: The ipc namespace the binderfs mount belongs to.
|
||||||
|
@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
|
||||||
* created.
|
* created.
|
||||||
* @root_gid: gid that needs to be used when a new binder device is
|
* @root_gid: gid that needs to be used when a new binder device is
|
||||||
* created.
|
* created.
|
||||||
|
* @mount_opts: The mount options in use.
|
||||||
|
* @device_count: The current number of allocated binder devices.
|
||||||
*/
|
*/
|
||||||
struct binderfs_info {
|
struct binderfs_info {
|
||||||
struct ipc_namespace *ipc_ns;
|
struct ipc_namespace *ipc_ns;
|
||||||
struct dentry *control_dentry;
|
struct dentry *control_dentry;
|
||||||
kuid_t root_uid;
|
kuid_t root_uid;
|
||||||
kgid_t root_gid;
|
kgid_t root_gid;
|
||||||
|
struct binderfs_mount_opts mount_opts;
|
||||||
|
int device_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
|
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
|
||||||
|
@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
|
||||||
* @userp: buffer to copy information about new device for userspace to
|
* @userp: buffer to copy information about new device for userspace to
|
||||||
* @req: struct binderfs_device as copied from userspace
|
* @req: struct binderfs_device as copied from userspace
|
||||||
*
|
*
|
||||||
* This function allocated a new binder_device and reserves a new minor
|
* This function allocates a new binder_device and reserves a new minor
|
||||||
* number for it.
|
* number for it.
|
||||||
* Minor numbers are limited and tracked globally in binderfs_minors. The
|
* Minor numbers are limited and tracked globally in binderfs_minors. The
|
||||||
* function will stash a struct binder_device for the specific binder
|
* function will stash a struct binder_device for the specific binder
|
||||||
|
@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
struct binderfs_device *req)
|
struct binderfs_device *req)
|
||||||
{
|
{
|
||||||
int minor, ret;
|
int minor, ret;
|
||||||
struct dentry *dentry, *dup, *root;
|
struct dentry *dentry, *root;
|
||||||
struct binder_device *device;
|
struct binder_device *device;
|
||||||
size_t name_len = BINDERFS_MAX_NAME + 1;
|
|
||||||
char *name = NULL;
|
char *name = NULL;
|
||||||
|
size_t name_len;
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
struct super_block *sb = ref_inode->i_sb;
|
struct super_block *sb = ref_inode->i_sb;
|
||||||
struct binderfs_info *info = sb->s_fs_info;
|
struct binderfs_info *info = sb->s_fs_info;
|
||||||
|
#if defined(CONFIG_IPC_NS)
|
||||||
|
bool use_reserve = (info->ipc_ns == &init_ipc_ns);
|
||||||
|
#else
|
||||||
|
bool use_reserve = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Reserve new minor number for the new device. */
|
/* Reserve new minor number for the new device. */
|
||||||
mutex_lock(&binderfs_minors_mutex);
|
mutex_lock(&binderfs_minors_mutex);
|
||||||
minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
|
if (++info->device_count <= info->mount_opts.max)
|
||||||
mutex_unlock(&binderfs_minors_mutex);
|
minor = ida_alloc_max(&binderfs_minors,
|
||||||
if (minor < 0)
|
use_reserve ? BINDERFS_MAX_MINOR :
|
||||||
|
BINDERFS_MAX_MINOR_CAPPED,
|
||||||
|
GFP_KERNEL);
|
||||||
|
else
|
||||||
|
minor = -ENOSPC;
|
||||||
|
if (minor < 0) {
|
||||||
|
--info->device_count;
|
||||||
|
mutex_unlock(&binderfs_minors_mutex);
|
||||||
return minor;
|
return minor;
|
||||||
|
}
|
||||||
|
mutex_unlock(&binderfs_minors_mutex);
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||||
|
@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
inode->i_uid = info->root_uid;
|
inode->i_uid = info->root_uid;
|
||||||
inode->i_gid = info->root_gid;
|
inode->i_gid = info->root_gid;
|
||||||
|
|
||||||
name = kmalloc(name_len, GFP_KERNEL);
|
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
|
||||||
|
name_len = strlen(req->name);
|
||||||
|
/* Make sure to include terminating NUL byte */
|
||||||
|
name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
|
||||||
if (!name)
|
if (!name)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
strscpy(name, req->name, name_len);
|
|
||||||
|
|
||||||
device->binderfs_inode = inode;
|
device->binderfs_inode = inode;
|
||||||
device->context.binder_context_mgr_uid = INVALID_UID;
|
device->context.binder_context_mgr_uid = INVALID_UID;
|
||||||
device->context.name = name;
|
device->context.name = name;
|
||||||
|
@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||||
|
|
||||||
root = sb->s_root;
|
root = sb->s_root;
|
||||||
inode_lock(d_inode(root));
|
inode_lock(d_inode(root));
|
||||||
dentry = d_alloc_name(root, name);
|
|
||||||
if (!dentry) {
|
/* look it up */
|
||||||
|
dentry = lookup_one_len(name, root, name_len);
|
||||||
|
if (IS_ERR(dentry)) {
|
||||||
inode_unlock(d_inode(root));
|
inode_unlock(d_inode(root));
|
||||||
ret = -ENOMEM;
|
ret = PTR_ERR(dentry);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Verify that the name userspace gave us is not already in use. */
|
if (d_really_is_positive(dentry)) {
|
||||||
dup = d_lookup(root, &dentry->d_name);
|
/* already exists */
|
||||||
if (dup) {
|
dput(dentry);
|
||||||
if (d_really_is_positive(dup)) {
|
inode_unlock(d_inode(root));
|
||||||
dput(dup);
|
ret = -EEXIST;
|
||||||
dput(dentry);
|
goto err;
|
||||||
inode_unlock(d_inode(root));
|
|
||||||
ret = -EEXIST;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
dput(dup);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inode->i_private = device;
|
inode->i_private = device;
|
||||||
d_add(dentry, inode);
|
d_instantiate(dentry, inode);
|
||||||
fsnotify_create(root->d_inode, dentry);
|
fsnotify_create(root->d_inode, dentry);
|
||||||
inode_unlock(d_inode(root));
|
inode_unlock(d_inode(root));
|
||||||
|
|
||||||
|
@ -187,6 +222,7 @@ err:
|
||||||
kfree(name);
|
kfree(name);
|
||||||
kfree(device);
|
kfree(device);
|
||||||
mutex_lock(&binderfs_minors_mutex);
|
mutex_lock(&binderfs_minors_mutex);
|
||||||
|
--info->device_count;
|
||||||
ida_free(&binderfs_minors, minor);
|
ida_free(&binderfs_minors, minor);
|
||||||
mutex_unlock(&binderfs_minors_mutex);
|
mutex_unlock(&binderfs_minors_mutex);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
|
@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
|
||||||
static void binderfs_evict_inode(struct inode *inode)
|
static void binderfs_evict_inode(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct binder_device *device = inode->i_private;
|
struct binder_device *device = inode->i_private;
|
||||||
|
struct binderfs_info *info = BINDERFS_I(inode);
|
||||||
|
|
||||||
clear_inode(inode);
|
clear_inode(inode);
|
||||||
|
|
||||||
|
@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&binderfs_minors_mutex);
|
mutex_lock(&binderfs_minors_mutex);
|
||||||
|
--info->device_count;
|
||||||
ida_free(&binderfs_minors, device->miscdev.minor);
|
ida_free(&binderfs_minors, device->miscdev.minor);
|
||||||
mutex_unlock(&binderfs_minors_mutex);
|
mutex_unlock(&binderfs_minors_mutex);
|
||||||
|
|
||||||
|
@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
|
||||||
kfree(device);
|
kfree(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* binderfs_parse_mount_opts - parse binderfs mount options
|
||||||
|
* @data: options to set (can be NULL in which case defaults are used)
|
||||||
|
*/
|
||||||
|
static int binderfs_parse_mount_opts(char *data,
|
||||||
|
struct binderfs_mount_opts *opts)
|
||||||
|
{
|
||||||
|
char *p;
|
||||||
|
opts->max = BINDERFS_MAX_MINOR;
|
||||||
|
|
||||||
|
while ((p = strsep(&data, ",")) != NULL) {
|
||||||
|
substring_t args[MAX_OPT_ARGS];
|
||||||
|
int token;
|
||||||
|
int max_devices;
|
||||||
|
|
||||||
|
if (!*p)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
token = match_token(p, tokens, args);
|
||||||
|
switch (token) {
|
||||||
|
case Opt_max:
|
||||||
|
if (match_int(&args[0], &max_devices) ||
|
||||||
|
(max_devices < 0 ||
|
||||||
|
(max_devices > BINDERFS_MAX_MINOR)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
opts->max = max_devices;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
pr_err("Invalid mount options\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int binderfs_remount(struct super_block *sb, int *flags, char *data)
|
||||||
|
{
|
||||||
|
struct binderfs_info *info = sb->s_fs_info;
|
||||||
|
return binderfs_parse_mount_opts(data, &info->mount_opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
|
||||||
|
{
|
||||||
|
struct binderfs_info *info;
|
||||||
|
|
||||||
|
info = root->d_sb->s_fs_info;
|
||||||
|
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
|
||||||
|
seq_printf(seq, ",max=%d", info->mount_opts.max);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct super_operations binderfs_super_ops = {
|
static const struct super_operations binderfs_super_ops = {
|
||||||
.statfs = simple_statfs,
|
.evict_inode = binderfs_evict_inode,
|
||||||
.evict_inode = binderfs_evict_inode,
|
.remount_fs = binderfs_remount,
|
||||||
|
.show_options = binderfs_show_mount_opts,
|
||||||
|
.statfs = simple_statfs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline bool is_binderfs_control_device(const struct dentry *dentry)
|
||||||
|
{
|
||||||
|
struct binderfs_info *info = dentry->d_sb->s_fs_info;
|
||||||
|
return info->control_dentry == dentry;
|
||||||
|
}
|
||||||
|
|
||||||
static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||||
struct inode *new_dir, struct dentry *new_dentry,
|
struct inode *new_dir, struct dentry *new_dentry,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
struct inode *inode = d_inode(old_dentry);
|
if (is_binderfs_control_device(old_dentry) ||
|
||||||
|
is_binderfs_control_device(new_dentry))
|
||||||
/* binderfs doesn't support directories. */
|
|
||||||
if (d_is_dir(old_dentry))
|
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (flags & ~RENAME_NOREPLACE)
|
return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!simple_empty(new_dentry))
|
|
||||||
return -ENOTEMPTY;
|
|
||||||
|
|
||||||
if (d_really_is_positive(new_dentry))
|
|
||||||
simple_unlink(new_dir, new_dentry);
|
|
||||||
|
|
||||||
old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
|
|
||||||
new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
|
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||||
{
|
{
|
||||||
/*
|
if (is_binderfs_control_device(dentry))
|
||||||
* The control dentry is only ever touched during mount so checking it
|
|
||||||
* here should not require us to take lock.
|
|
||||||
*/
|
|
||||||
if (BINDERFS_I(dir)->control_dentry == dentry)
|
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
return simple_unlink(dir, dentry);
|
return simple_unlink(dir, dentry);
|
||||||
|
@ -318,8 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
||||||
if (!device)
|
if (!device)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
inode_lock(d_inode(root));
|
|
||||||
|
|
||||||
/* If we have already created a binder-control node, return. */
|
/* If we have already created a binder-control node, return. */
|
||||||
if (info->control_dentry) {
|
if (info->control_dentry) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -358,12 +438,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
||||||
inode->i_private = device;
|
inode->i_private = device;
|
||||||
info->control_dentry = dentry;
|
info->control_dentry = dentry;
|
||||||
d_add(dentry, inode);
|
d_add(dentry, inode);
|
||||||
inode_unlock(d_inode(root));
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
inode_unlock(d_inode(root));
|
|
||||||
kfree(device);
|
kfree(device);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
|
|
||||||
|
@ -378,12 +456,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
|
||||||
|
|
||||||
static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
struct binderfs_info *info;
|
struct binderfs_info *info;
|
||||||
int ret = -ENOMEM;
|
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
struct ipc_namespace *ipc_ns = sb->s_fs_info;
|
|
||||||
|
|
||||||
get_ipc_ns(ipc_ns);
|
|
||||||
|
|
||||||
sb->s_blocksize = PAGE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
|
@ -405,11 +480,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
sb->s_op = &binderfs_super_ops;
|
sb->s_op = &binderfs_super_ops;
|
||||||
sb->s_time_gran = 1;
|
sb->s_time_gran = 1;
|
||||||
|
|
||||||
info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
||||||
if (!info)
|
if (!sb->s_fs_info)
|
||||||
goto err_without_dentry;
|
return -ENOMEM;
|
||||||
|
info = sb->s_fs_info;
|
||||||
|
|
||||||
|
info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
|
||||||
|
|
||||||
|
ret = binderfs_parse_mount_opts(data, &info->mount_opts);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
info->ipc_ns = ipc_ns;
|
|
||||||
info->root_gid = make_kgid(sb->s_user_ns, 0);
|
info->root_gid = make_kgid(sb->s_user_ns, 0);
|
||||||
if (!gid_valid(info->root_gid))
|
if (!gid_valid(info->root_gid))
|
||||||
info->root_gid = GLOBAL_ROOT_GID;
|
info->root_gid = GLOBAL_ROOT_GID;
|
||||||
|
@ -417,11 +498,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
if (!uid_valid(info->root_uid))
|
if (!uid_valid(info->root_uid))
|
||||||
info->root_uid = GLOBAL_ROOT_UID;
|
info->root_uid = GLOBAL_ROOT_UID;
|
||||||
|
|
||||||
sb->s_fs_info = info;
|
|
||||||
|
|
||||||
inode = new_inode(sb);
|
inode = new_inode(sb);
|
||||||
if (!inode)
|
if (!inode)
|
||||||
goto err_without_dentry;
|
return -ENOMEM;
|
||||||
|
|
||||||
inode->i_ino = FIRST_INODE;
|
inode->i_ino = FIRST_INODE;
|
||||||
inode->i_fop = &simple_dir_operations;
|
inode->i_fop = &simple_dir_operations;
|
||||||
|
@ -432,79 +511,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
|
|
||||||
sb->s_root = d_make_root(inode);
|
sb->s_root = d_make_root(inode);
|
||||||
if (!sb->s_root)
|
if (!sb->s_root)
|
||||||
goto err_without_dentry;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = binderfs_binder_ctl_create(sb);
|
return binderfs_binder_ctl_create(sb);
|
||||||
if (ret)
|
|
||||||
goto err_with_dentry;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_with_dentry:
|
|
||||||
dput(sb->s_root);
|
|
||||||
sb->s_root = NULL;
|
|
||||||
|
|
||||||
err_without_dentry:
|
|
||||||
put_ipc_ns(ipc_ns);
|
|
||||||
iput(inode);
|
|
||||||
kfree(info);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int binderfs_test_super(struct super_block *sb, void *data)
|
|
||||||
{
|
|
||||||
struct binderfs_info *info = sb->s_fs_info;
|
|
||||||
|
|
||||||
if (info)
|
|
||||||
return info->ipc_ns == data;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int binderfs_set_super(struct super_block *sb, void *data)
|
|
||||||
{
|
|
||||||
sb->s_fs_info = data;
|
|
||||||
return set_anon_super(sb, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *binderfs_mount(struct file_system_type *fs_type,
|
static struct dentry *binderfs_mount(struct file_system_type *fs_type,
|
||||||
int flags, const char *dev_name,
|
int flags, const char *dev_name,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
struct super_block *sb;
|
return mount_nodev(fs_type, flags, data, binderfs_fill_super);
|
||||||
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
|
|
||||||
|
|
||||||
if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
|
|
||||||
return ERR_PTR(-EPERM);
|
|
||||||
|
|
||||||
sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
|
|
||||||
flags, ipc_ns->user_ns, ipc_ns);
|
|
||||||
if (IS_ERR(sb))
|
|
||||||
return ERR_CAST(sb);
|
|
||||||
|
|
||||||
if (!sb->s_root) {
|
|
||||||
int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
|
|
||||||
if (ret) {
|
|
||||||
deactivate_locked_super(sb);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
sb->s_flags |= SB_ACTIVE;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dget(sb->s_root);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void binderfs_kill_super(struct super_block *sb)
|
static void binderfs_kill_super(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct binderfs_info *info = sb->s_fs_info;
|
struct binderfs_info *info = sb->s_fs_info;
|
||||||
|
|
||||||
|
kill_litter_super(sb);
|
||||||
|
|
||||||
if (info && info->ipc_ns)
|
if (info && info->ipc_ns)
|
||||||
put_ipc_ns(info->ipc_ns);
|
put_ipc_ns(info->ipc_ns);
|
||||||
|
|
||||||
kfree(info);
|
kfree(info);
|
||||||
kill_litter_super(sb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct file_system_type binder_fs_type = {
|
static struct file_system_type binder_fs_type = {
|
||||||
|
@ -530,14 +558,6 @@ static int __init init_binderfs(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
binderfs_mnt = kern_mount(&binder_fs_type);
|
|
||||||
if (IS_ERR(binderfs_mnt)) {
|
|
||||||
ret = PTR_ERR(binderfs_mnt);
|
|
||||||
binderfs_mnt = NULL;
|
|
||||||
unregister_filesystem(&binder_fs_type);
|
|
||||||
unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
|
||||||
.sg_tablesize = MAX_DCMDS,
|
.sg_tablesize = MAX_DCMDS,
|
||||||
/* We may not need that strict one */
|
/* We may not need that strict one */
|
||||||
.dma_boundary = ATA_DMA_BOUNDARY,
|
.dma_boundary = ATA_DMA_BOUNDARY,
|
||||||
|
/* Not sure what the real max is but we know it's less than 64K, let's
|
||||||
|
* use 64K minus 256
|
||||||
|
*/
|
||||||
|
.max_segment_size = MAX_DBDMA_SEG,
|
||||||
.slave_configure = pata_macio_slave_config,
|
.slave_configure = pata_macio_slave_config,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
|
||||||
/* Make sure we have sane initial timings in the cache */
|
/* Make sure we have sane initial timings in the cache */
|
||||||
pata_macio_default_timings(priv);
|
pata_macio_default_timings(priv);
|
||||||
|
|
||||||
/* Not sure what the real max is but we know it's less than 64K, let's
|
|
||||||
* use 64K minus 256
|
|
||||||
*/
|
|
||||||
dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
|
|
||||||
|
|
||||||
/* Allocate libata host for 1 port */
|
/* Allocate libata host for 1 port */
|
||||||
memset(&pinfo, 0, sizeof(struct ata_port_info));
|
memset(&pinfo, 0, sizeof(struct ata_port_info));
|
||||||
pmac_macio_calc_timing_masks(priv, &pinfo);
|
pmac_macio_calc_timing_masks(priv, &pinfo);
|
||||||
|
|
|
@ -245,8 +245,15 @@ struct inic_port_priv {
|
||||||
|
|
||||||
static struct scsi_host_template inic_sht = {
|
static struct scsi_host_template inic_sht = {
|
||||||
ATA_BASE_SHT(DRV_NAME),
|
ATA_BASE_SHT(DRV_NAME),
|
||||||
.sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
|
.sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
|
||||||
.dma_boundary = INIC_DMA_BOUNDARY,
|
|
||||||
|
/*
|
||||||
|
* This controller is braindamaged. dma_boundary is 0xffff like others
|
||||||
|
* but it will lock up the whole machine HARD if 65536 byte PRD entry
|
||||||
|
* is fed. Reduce maximum segment size.
|
||||||
|
*/
|
||||||
|
.dma_boundary = INIC_DMA_BOUNDARY,
|
||||||
|
.max_segment_size = 65536 - 512,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int scr_map[] = {
|
static const int scr_map[] = {
|
||||||
|
@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This controller is braindamaged. dma_boundary is 0xffff
|
|
||||||
* like others but it will lock up the whole machine HARD if
|
|
||||||
* 65536 byte PRD entry is fed. Reduce maximum segment size.
|
|
||||||
*/
|
|
||||||
rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
|
|
||||||
if (rc) {
|
|
||||||
dev_err(&pdev->dev, "failed to set the maximum segment size\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
|
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(&pdev->dev, "failed to initialize controller\n");
|
dev_err(&pdev->dev, "failed to initialize controller\n");
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/uuid.h>
|
#include <linux/uuid.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
#define IPMI_DRIVER_VERSION "39.2"
|
#define IPMI_DRIVER_VERSION "39.2"
|
||||||
|
|
||||||
|
@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
|
||||||
{ }
|
{ }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int initialized;
|
static bool initialized;
|
||||||
|
static bool drvregistered;
|
||||||
|
|
||||||
enum ipmi_panic_event_op {
|
enum ipmi_panic_event_op {
|
||||||
IPMI_SEND_PANIC_EVENT_NONE,
|
IPMI_SEND_PANIC_EVENT_NONE,
|
||||||
|
@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
|
||||||
|
|
||||||
static LIST_HEAD(ipmi_interfaces);
|
static LIST_HEAD(ipmi_interfaces);
|
||||||
static DEFINE_MUTEX(ipmi_interfaces_mutex);
|
static DEFINE_MUTEX(ipmi_interfaces_mutex);
|
||||||
DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
|
struct srcu_struct ipmi_interfaces_srcu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* List of watchers that want to know when smi's are added and deleted.
|
* List of watchers that want to know when smi's are added and deleted.
|
||||||
|
@ -720,7 +722,15 @@ struct watcher_entry {
|
||||||
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
|
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
|
||||||
{
|
{
|
||||||
struct ipmi_smi *intf;
|
struct ipmi_smi *intf;
|
||||||
int index;
|
int index, rv;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the driver is actually initialized, this handles
|
||||||
|
* problems with initialization order.
|
||||||
|
*/
|
||||||
|
rv = ipmi_init_msghandler();
|
||||||
|
if (rv)
|
||||||
|
return rv;
|
||||||
|
|
||||||
mutex_lock(&smi_watchers_mutex);
|
mutex_lock(&smi_watchers_mutex);
|
||||||
|
|
||||||
|
@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
|
||||||
|
|
||||||
if (user) {
|
if (user) {
|
||||||
user->handler->ipmi_recv_hndl(msg, user->handler_data);
|
user->handler->ipmi_recv_hndl(msg, user->handler_data);
|
||||||
release_ipmi_user(msg->user, index);
|
release_ipmi_user(user, index);
|
||||||
} else {
|
} else {
|
||||||
/* User went away, give up. */
|
/* User went away, give up. */
|
||||||
ipmi_free_recv_msg(msg);
|
ipmi_free_recv_msg(msg);
|
||||||
|
@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct ipmi_user *new_user;
|
struct ipmi_user *new_user;
|
||||||
int rv = 0, index;
|
int rv, index;
|
||||||
struct ipmi_smi *intf;
|
struct ipmi_smi *intf;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
|
||||||
* Make sure the driver is actually initialized, this handles
|
* Make sure the driver is actually initialized, this handles
|
||||||
* problems with initialization order.
|
* problems with initialization order.
|
||||||
*/
|
*/
|
||||||
if (!initialized) {
|
rv = ipmi_init_msghandler();
|
||||||
rv = ipmi_init_msghandler();
|
if (rv)
|
||||||
if (rv)
|
return rv;
|
||||||
return rv;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The init code doesn't return an error if it was turned
|
|
||||||
* off, but it won't initialize. Check that.
|
|
||||||
*/
|
|
||||||
if (!initialized)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
|
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
|
||||||
if (!new_user)
|
if (!new_user)
|
||||||
|
@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
|
||||||
static void free_user(struct kref *ref)
|
static void free_user(struct kref *ref)
|
||||||
{
|
{
|
||||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||||
|
cleanup_srcu_struct(&user->release_barrier);
|
||||||
kfree(user);
|
kfree(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
|
||||||
{
|
{
|
||||||
_ipmi_destroy_user(user);
|
_ipmi_destroy_user(user);
|
||||||
|
|
||||||
cleanup_srcu_struct(&user->release_barrier);
|
|
||||||
kref_put(&user->refcount, free_user);
|
kref_put(&user->refcount, free_user);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
|
||||||
if (!user)
|
if (!user)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (channel >= IPMI_MAX_CHANNELS)
|
if (channel >= IPMI_MAX_CHANNELS) {
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
else
|
} else {
|
||||||
|
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
||||||
user->intf->addrinfo[channel].address = address;
|
user->intf->addrinfo[channel].address = address;
|
||||||
|
}
|
||||||
release_ipmi_user(user, index);
|
release_ipmi_user(user, index);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
|
@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
|
||||||
if (!user)
|
if (!user)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (channel >= IPMI_MAX_CHANNELS)
|
if (channel >= IPMI_MAX_CHANNELS) {
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
else
|
} else {
|
||||||
|
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
||||||
*address = user->intf->addrinfo[channel].address;
|
*address = user->intf->addrinfo[channel].address;
|
||||||
|
}
|
||||||
release_ipmi_user(user, index);
|
release_ipmi_user(user, index);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
|
@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
|
||||||
if (!user)
|
if (!user)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (channel >= IPMI_MAX_CHANNELS)
|
if (channel >= IPMI_MAX_CHANNELS) {
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
else
|
} else {
|
||||||
|
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
||||||
user->intf->addrinfo[channel].lun = LUN & 0x3;
|
user->intf->addrinfo[channel].lun = LUN & 0x3;
|
||||||
|
}
|
||||||
release_ipmi_user(user, index);
|
release_ipmi_user(user, index);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
|
@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
|
||||||
if (!user)
|
if (!user)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (channel >= IPMI_MAX_CHANNELS)
|
if (channel >= IPMI_MAX_CHANNELS) {
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
else
|
} else {
|
||||||
|
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
||||||
*address = user->intf->addrinfo[channel].lun;
|
*address = user->intf->addrinfo[channel].lun;
|
||||||
|
}
|
||||||
release_ipmi_user(user, index);
|
release_ipmi_user(user, index);
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
|
@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
|
||||||
{
|
{
|
||||||
if (addr->channel >= IPMI_MAX_CHANNELS)
|
if (addr->channel >= IPMI_MAX_CHANNELS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
|
||||||
*lun = intf->addrinfo[addr->channel].lun;
|
*lun = intf->addrinfo[addr->channel].lun;
|
||||||
*saddr = intf->addrinfo[addr->channel].address;
|
*saddr = intf->addrinfo[addr->channel].address;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
|
||||||
* Make sure the driver is actually initialized, this handles
|
* Make sure the driver is actually initialized, this handles
|
||||||
* problems with initialization order.
|
* problems with initialization order.
|
||||||
*/
|
*/
|
||||||
if (!initialized) {
|
rv = ipmi_init_msghandler();
|
||||||
rv = ipmi_init_msghandler();
|
if (rv)
|
||||||
if (rv)
|
return rv;
|
||||||
return rv;
|
|
||||||
/*
|
|
||||||
* The init code doesn't return an error if it was turned
|
|
||||||
* off, but it won't initialize. Check that.
|
|
||||||
*/
|
|
||||||
if (!initialized)
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
|
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
|
||||||
if (!intf)
|
if (!intf)
|
||||||
|
@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Must be called with ipmi_interfaces_mutex held. */
|
||||||
|
static int ipmi_register_driver(void)
|
||||||
|
{
|
||||||
|
int rv;
|
||||||
|
|
||||||
|
if (drvregistered)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
rv = driver_register(&ipmidriver.driver);
|
||||||
|
if (rv)
|
||||||
|
pr_err("Could not register IPMI driver\n");
|
||||||
|
else
|
||||||
|
drvregistered = true;
|
||||||
|
return rv;
|
||||||
|
}
|
||||||
|
|
||||||
static struct notifier_block panic_block = {
|
static struct notifier_block panic_block = {
|
||||||
.notifier_call = panic_event,
|
.notifier_call = panic_event,
|
||||||
.next = NULL,
|
.next = NULL,
|
||||||
|
@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
|
||||||
{
|
{
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
|
mutex_lock(&ipmi_interfaces_mutex);
|
||||||
|
rv = ipmi_register_driver();
|
||||||
|
if (rv)
|
||||||
|
goto out;
|
||||||
if (initialized)
|
if (initialized)
|
||||||
return 0;
|
goto out;
|
||||||
|
|
||||||
rv = driver_register(&ipmidriver.driver);
|
init_srcu_struct(&ipmi_interfaces_srcu);
|
||||||
if (rv) {
|
|
||||||
pr_err("Could not register IPMI driver\n");
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_info("version " IPMI_DRIVER_VERSION "\n");
|
|
||||||
|
|
||||||
timer_setup(&ipmi_timer, ipmi_timeout, 0);
|
timer_setup(&ipmi_timer, ipmi_timeout, 0);
|
||||||
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
|
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
|
||||||
|
|
||||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||||
|
|
||||||
initialized = 1;
|
initialized = true;
|
||||||
|
|
||||||
return 0;
|
out:
|
||||||
|
mutex_unlock(&ipmi_interfaces_mutex);
|
||||||
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init ipmi_init_msghandler_mod(void)
|
static int __init ipmi_init_msghandler_mod(void)
|
||||||
{
|
{
|
||||||
ipmi_init_msghandler();
|
int rv;
|
||||||
return 0;
|
|
||||||
|
pr_info("version " IPMI_DRIVER_VERSION "\n");
|
||||||
|
|
||||||
|
mutex_lock(&ipmi_interfaces_mutex);
|
||||||
|
rv = ipmi_register_driver();
|
||||||
|
mutex_unlock(&ipmi_interfaces_mutex);
|
||||||
|
|
||||||
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit cleanup_ipmi(void)
|
static void __exit cleanup_ipmi(void)
|
||||||
{
|
{
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
if (!initialized)
|
if (initialized) {
|
||||||
return;
|
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||||
|
&panic_block);
|
||||||
|
|
||||||
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
|
/*
|
||||||
|
* This can't be called if any interfaces exist, so no worry
|
||||||
|
* about shutting down the interfaces.
|
||||||
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can't be called if any interfaces exist, so no worry
|
* Tell the timer to stop, then wait for it to stop. This
|
||||||
* about shutting down the interfaces.
|
* avoids problems with race conditions removing the timer
|
||||||
*/
|
* here.
|
||||||
|
*/
|
||||||
|
atomic_inc(&stop_operation);
|
||||||
|
del_timer_sync(&ipmi_timer);
|
||||||
|
|
||||||
/*
|
initialized = false;
|
||||||
* Tell the timer to stop, then wait for it to stop. This
|
|
||||||
* avoids problems with race conditions removing the timer
|
|
||||||
* here.
|
|
||||||
*/
|
|
||||||
atomic_inc(&stop_operation);
|
|
||||||
del_timer_sync(&ipmi_timer);
|
|
||||||
|
|
||||||
driver_unregister(&ipmidriver.driver);
|
/* Check for buffer leaks. */
|
||||||
|
count = atomic_read(&smi_msg_inuse_count);
|
||||||
|
if (count != 0)
|
||||||
|
pr_warn("SMI message count %d at exit\n", count);
|
||||||
|
count = atomic_read(&recv_msg_inuse_count);
|
||||||
|
if (count != 0)
|
||||||
|
pr_warn("recv message count %d at exit\n", count);
|
||||||
|
|
||||||
initialized = 0;
|
cleanup_srcu_struct(&ipmi_interfaces_srcu);
|
||||||
|
}
|
||||||
/* Check for buffer leaks. */
|
if (drvregistered)
|
||||||
count = atomic_read(&smi_msg_inuse_count);
|
driver_unregister(&ipmidriver.driver);
|
||||||
if (count != 0)
|
|
||||||
pr_warn("SMI message count %d at exit\n", count);
|
|
||||||
count = atomic_read(&recv_msg_inuse_count);
|
|
||||||
if (count != 0)
|
|
||||||
pr_warn("recv message count %d at exit\n", count);
|
|
||||||
}
|
}
|
||||||
module_exit(cleanup_ipmi);
|
module_exit(cleanup_ipmi);
|
||||||
|
|
||||||
|
|
|
@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||||
|
|
||||||
/* Remove the multi-part read marker. */
|
/* Remove the multi-part read marker. */
|
||||||
len -= 2;
|
len -= 2;
|
||||||
|
data += 2;
|
||||||
for (i = 0; i < len; i++)
|
for (i = 0; i < len; i++)
|
||||||
ssif_info->data[i] = data[i+2];
|
ssif_info->data[i] = data[i];
|
||||||
ssif_info->multi_len = len;
|
ssif_info->multi_len = len;
|
||||||
ssif_info->multi_pos = 1;
|
ssif_info->multi_pos = 1;
|
||||||
|
|
||||||
|
@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||||
}
|
}
|
||||||
|
|
||||||
blocknum = data[0];
|
blocknum = data[0];
|
||||||
|
len--;
|
||||||
|
data++;
|
||||||
|
|
||||||
if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
|
if (blocknum != 0xff && len != 31) {
|
||||||
|
/* All blocks but the last must have 31 data bytes. */
|
||||||
|
result = -EIO;
|
||||||
|
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
|
||||||
|
pr_info("Received middle message <31\n");
|
||||||
|
|
||||||
|
goto continue_op;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
|
||||||
/* Received message too big, abort the operation. */
|
/* Received message too big, abort the operation. */
|
||||||
result = -E2BIG;
|
result = -E2BIG;
|
||||||
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
|
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
|
||||||
|
@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||||
goto continue_op;
|
goto continue_op;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove the blocknum from the data. */
|
|
||||||
len--;
|
|
||||||
for (i = 0; i < len; i++)
|
for (i = 0; i < len; i++)
|
||||||
ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
|
ssif_info->data[i + ssif_info->multi_len] = data[i];
|
||||||
ssif_info->multi_len += len;
|
ssif_info->multi_len += len;
|
||||||
if (blocknum == 0xff) {
|
if (blocknum == 0xff) {
|
||||||
/* End of read */
|
/* End of read */
|
||||||
len = ssif_info->multi_len;
|
len = ssif_info->multi_len;
|
||||||
data = ssif_info->data;
|
data = ssif_info->data;
|
||||||
} else if (blocknum + 1 != ssif_info->multi_pos) {
|
} else if (blocknum != ssif_info->multi_pos) {
|
||||||
/*
|
/*
|
||||||
* Out of sequence block, just abort. Block
|
* Out of sequence block, just abort. Block
|
||||||
* numbers start at zero for the second block,
|
* numbers start at zero for the second block,
|
||||||
|
@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
continue_op:
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ssif_inc_stat(ssif_info, receive_errors);
|
ssif_inc_stat(ssif_info, receive_errors);
|
||||||
} else {
|
} else {
|
||||||
|
@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
||||||
ssif_inc_stat(ssif_info, received_message_parts);
|
ssif_inc_stat(ssif_info, received_message_parts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
continue_op:
|
|
||||||
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
|
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
|
||||||
pr_info("DONE 1: state = %d, result=%d\n",
|
pr_info("DONE 1: state = %d, result=%d\n",
|
||||||
ssif_info->ssif_state, result);
|
ssif_info->ssif_state, result);
|
||||||
|
|
|
@ -59,6 +59,7 @@
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/serial_8250.h>
|
#include <linux/serial_8250.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include "smapi.h"
|
#include "smapi.h"
|
||||||
#include "mwavedd.h"
|
#include "mwavedd.h"
|
||||||
#include "3780i.h"
|
#include "3780i.h"
|
||||||
|
@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
||||||
ipcnum);
|
ipcnum);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
ipcnum = array_index_nospec(ipcnum,
|
||||||
|
ARRAY_SIZE(pDrvData->IPCs));
|
||||||
PRINTK_3(TRACE_MWAVE,
|
PRINTK_3(TRACE_MWAVE,
|
||||||
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
|
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
|
||||||
" ipcnum %x entry usIntCount %x\n",
|
" ipcnum %x entry usIntCount %x\n",
|
||||||
|
@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
||||||
" Invalid ipcnum %x\n", ipcnum);
|
" Invalid ipcnum %x\n", ipcnum);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
ipcnum = array_index_nospec(ipcnum,
|
||||||
|
ARRAY_SIZE(pDrvData->IPCs));
|
||||||
PRINTK_3(TRACE_MWAVE,
|
PRINTK_3(TRACE_MWAVE,
|
||||||
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
|
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
|
||||||
" ipcnum %x, usIntCount %x\n",
|
" ipcnum %x, usIntCount %x\n",
|
||||||
|
@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
||||||
ipcnum);
|
ipcnum);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
ipcnum = array_index_nospec(ipcnum,
|
||||||
|
ARRAY_SIZE(pDrvData->IPCs));
|
||||||
mutex_lock(&mwave_mutex);
|
mutex_lock(&mwave_mutex);
|
||||||
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
|
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
|
||||||
pDrvData->IPCs[ipcnum].bIsEnabled = false;
|
pDrvData->IPCs[ipcnum].bIsEnabled = false;
|
||||||
|
|
|
@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX
|
||||||
source "drivers/clk/actions/Kconfig"
|
source "drivers/clk/actions/Kconfig"
|
||||||
source "drivers/clk/bcm/Kconfig"
|
source "drivers/clk/bcm/Kconfig"
|
||||||
source "drivers/clk/hisilicon/Kconfig"
|
source "drivers/clk/hisilicon/Kconfig"
|
||||||
source "drivers/clk/imx/Kconfig"
|
|
||||||
source "drivers/clk/imgtec/Kconfig"
|
source "drivers/clk/imgtec/Kconfig"
|
||||||
source "drivers/clk/imx/Kconfig"
|
source "drivers/clk/imx/Kconfig"
|
||||||
source "drivers/clk/ingenic/Kconfig"
|
source "drivers/clk/ingenic/Kconfig"
|
||||||
|
|
|
@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
|
||||||
|
|
||||||
if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
|
if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
|
||||||
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
|
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
|
||||||
if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
|
else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
|
||||||
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
|
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
|
||||||
|
else /* Invalid; should have been caught by vc5_probe() */
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
|
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
|
||||||
|
|
|
@ -2779,7 +2779,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
|
||||||
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
|
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
|
||||||
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
|
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
|
||||||
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
|
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
|
||||||
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
|
seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
|
||||||
seq_printf(s, "\"duty_cycle\": %u",
|
seq_printf(s, "\"duty_cycle\": %u",
|
||||||
clk_core_get_scaled_duty_cycle(c, 100000));
|
clk_core_get_scaled_duty_cycle(c, 100000));
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
if (!res)
|
||||||
|
return -EINVAL;
|
||||||
base = devm_ioremap(dev, res->start, resource_size(res));
|
base = devm_ioremap(dev, res->start, resource_size(res));
|
||||||
if (!base)
|
if (!base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -215,6 +215,7 @@ config MSM_MMCC_8996
|
||||||
|
|
||||||
config MSM_GCC_8998
|
config MSM_GCC_8998
|
||||||
tristate "MSM8998 Global Clock Controller"
|
tristate "MSM8998 Global Clock Controller"
|
||||||
|
select QCOM_GDSC
|
||||||
help
|
help
|
||||||
Support for the global clock controller on msm8998 devices.
|
Support for the global clock controller on msm8998 devices.
|
||||||
Say Y if you want to use peripheral devices such as UART, SPI,
|
Say Y if you want to use peripheral devices such as UART, SPI,
|
||||||
|
|
|
@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
|
||||||
/* Read mdiv and fdiv from the fdbck register */
|
/* Read mdiv and fdiv from the fdbck register */
|
||||||
reg = readl(socfpgaclk->hw.reg + 0x4);
|
reg = readl(socfpgaclk->hw.reg + 0x4);
|
||||||
mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
|
mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
|
||||||
vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
|
vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
|
||||||
|
|
||||||
return (unsigned long)vco_freq;
|
return (unsigned long)vco_freq;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,17 +12,17 @@
|
||||||
|
|
||||||
#include "stratix10-clk.h"
|
#include "stratix10-clk.h"
|
||||||
|
|
||||||
static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
|
static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
|
||||||
"f2s_free_clk",};
|
"f2s-free-clk",};
|
||||||
static const char * const cntr_mux[] = { "main_pll", "periph_pll",
|
static const char * const cntr_mux[] = { "main_pll", "periph_pll",
|
||||||
"osc1", "cb_intosc_hs_div2_clk",
|
"osc1", "cb-intosc-hs-div2-clk",
|
||||||
"f2s_free_clk"};
|
"f2s-free-clk"};
|
||||||
static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
|
static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
|
||||||
|
|
||||||
static const char * const noc_free_mux[] = {"main_noc_base_clk",
|
static const char * const noc_free_mux[] = {"main_noc_base_clk",
|
||||||
"peri_noc_base_clk",
|
"peri_noc_base_clk",
|
||||||
"osc1", "cb_intosc_hs_div2_clk",
|
"osc1", "cb-intosc-hs-div2-clk",
|
||||||
"f2s_free_clk"};
|
"f2s-free-clk"};
|
||||||
|
|
||||||
static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
|
static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
|
||||||
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
|
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
|
||||||
|
@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
|
||||||
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
|
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
|
||||||
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
|
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
|
||||||
|
|
||||||
static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
|
static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
|
||||||
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
|
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
|
||||||
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
|
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
|
||||||
|
|
||||||
static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
|
static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
|
||||||
"peri_mpu_base_clk",
|
"peri_mpu_base_clk",
|
||||||
"osc1", "cb_intosc_hs_div2_clk",
|
"osc1", "cb-intosc-hs-div2-clk",
|
||||||
"f2s_free_clk"};
|
"f2s-free-clk"};
|
||||||
|
|
||||||
/* clocks in AO (always on) controller */
|
/* clocks in AO (always on) controller */
|
||||||
static const struct stratix10_pll_clock s10_pll_clks[] = {
|
static const struct stratix10_pll_clock s10_pll_clks[] = {
|
||||||
|
|
|
@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
|
||||||
struct tegra_dfll_soc_data *soc;
|
struct tegra_dfll_soc_data *soc;
|
||||||
|
|
||||||
soc = tegra_dfll_unregister(pdev);
|
soc = tegra_dfll_unregister(pdev);
|
||||||
if (IS_ERR(soc))
|
if (IS_ERR(soc)) {
|
||||||
dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
|
dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
|
||||||
PTR_ERR(soc));
|
PTR_ERR(soc));
|
||||||
|
return PTR_ERR(soc);
|
||||||
|
}
|
||||||
|
|
||||||
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
|
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
|
||||||
|
|
||||||
|
|
|
@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) *
|
zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
|
||||||
clock_max_idx, GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!zynqmp_data)
|
if (!zynqmp_data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
|
||||||
if (device->is_local)
|
if (device->is_local)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
|
|
||||||
WARN_ON(dma_set_max_seg_size(device->card->device,
|
|
||||||
SBP2_MAX_SEG_SIZE));
|
|
||||||
|
|
||||||
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
|
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
|
||||||
if (shost == NULL)
|
if (shost == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
|
||||||
.eh_abort_handler = sbp2_scsi_abort,
|
.eh_abort_handler = sbp2_scsi_abort,
|
||||||
.this_id = -1,
|
.this_id = -1,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
|
.max_segment_size = SBP2_MAX_SEG_SIZE,
|
||||||
.can_queue = 1,
|
.can_queue = 1,
|
||||||
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
||||||
};
|
};
|
||||||
|
|
|
@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
|
||||||
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
|
{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
{ 0, 0, 0, 0, 0 },
|
{ 0, 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include "vega10_pptable.h"
|
#include "vega10_pptable.h"
|
||||||
|
|
||||||
#define NUM_DSPCLK_LEVELS 8
|
#define NUM_DSPCLK_LEVELS 8
|
||||||
|
#define VEGA10_ENGINECLOCK_HARDMAX 198000
|
||||||
|
|
||||||
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
|
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
|
||||||
enum phm_platform_caps cap)
|
enum phm_platform_caps cap)
|
||||||
|
@ -258,7 +259,26 @@ static int init_over_drive_limits(
|
||||||
struct pp_hwmgr *hwmgr,
|
struct pp_hwmgr *hwmgr,
|
||||||
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
|
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
|
||||||
{
|
{
|
||||||
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
|
||||||
|
(const ATOM_Vega10_GFXCLK_Dependency_Table *)
|
||||||
|
(((unsigned long) powerplay_table) +
|
||||||
|
le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
|
||||||
|
bool is_acg_enabled = false;
|
||||||
|
ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
|
||||||
|
|
||||||
|
if (gfxclk_dep_table->ucRevId == 1) {
|
||||||
|
patom_record_v2 =
|
||||||
|
(ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
|
||||||
|
is_acg_enabled =
|
||||||
|
(bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
|
||||||
|
!is_acg_enabled)
|
||||||
|
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||||
|
VEGA10_ENGINECLOCK_HARDMAX;
|
||||||
|
else
|
||||||
|
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||||
le32_to_cpu(powerplay_table->ulMaxODEngineClock);
|
le32_to_cpu(powerplay_table->ulMaxODEngineClock);
|
||||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
|
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
|
||||||
le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
|
le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
|
||||||
|
|
|
@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
|
|
||||||
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
||||||
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
|
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
|
||||||
|
|
||||||
|
wa_ctx->indirect_ctx.obj = NULL;
|
||||||
|
wa_ctx->indirect_ctx.shadow_va = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
||||||
|
@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||||
|
|
||||||
list_del_init(&workload->list);
|
list_del_init(&workload->list);
|
||||||
|
|
||||||
if (!workload->status) {
|
|
||||||
release_shadow_batch_buffer(workload);
|
|
||||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
|
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
|
||||||
/* if workload->status is not successful means HW GPU
|
/* if workload->status is not successful means HW GPU
|
||||||
* has occurred GPU hang or something wrong with i915/GVT,
|
* has occurred GPU hang or something wrong with i915/GVT,
|
||||||
|
@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
|
||||||
{
|
{
|
||||||
struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
||||||
|
|
||||||
|
release_shadow_batch_buffer(workload);
|
||||||
|
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||||
|
|
||||||
if (workload->shadow_mm)
|
if (workload->shadow_mm)
|
||||||
intel_vgpu_mm_put(workload->shadow_mm);
|
intel_vgpu_mm_put(workload->shadow_mm);
|
||||||
|
|
||||||
|
|
|
@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
|
||||||
*/
|
*/
|
||||||
if (!(prio & I915_PRIORITY_NEWCLIENT)) {
|
if (!(prio & I915_PRIORITY_NEWCLIENT)) {
|
||||||
prio |= I915_PRIORITY_NEWCLIENT;
|
prio |= I915_PRIORITY_NEWCLIENT;
|
||||||
|
active->sched.attr.priority = prio;
|
||||||
list_move_tail(&active->sched.link,
|
list_move_tail(&active->sched.link,
|
||||||
i915_sched_lookup_priolist(engine, prio));
|
i915_sched_lookup_priolist(engine, prio));
|
||||||
}
|
}
|
||||||
|
@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||||
|
GEM_BUG_ON(last &&
|
||||||
|
need_preempt(engine, last, rq_prio(rq)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Can we combine this request with the current port?
|
* Can we combine this request with the current port?
|
||||||
* It has to be the same context/ringbuffer and not
|
* It has to be the same context/ringbuffer and not
|
||||||
|
|
|
@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
|
||||||
np = dev_pm_opp_get_of_node(opp);
|
np = dev_pm_opp_get_of_node(opp);
|
||||||
|
|
||||||
if (np) {
|
if (np) {
|
||||||
of_property_read_u32(np, "qcom,level", &val);
|
of_property_read_u32(np, "opp-level", &val);
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
adreno_gpu->rev = config->rev;
|
adreno_gpu->rev = config->rev;
|
||||||
|
|
||||||
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
|
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
|
||||||
adreno_gpu_config.irqname = "kgsl_3d0_irq";
|
|
||||||
|
|
||||||
adreno_gpu_config.va_start = SZ_16M;
|
adreno_gpu_config.va_start = SZ_16M;
|
||||||
adreno_gpu_config.va_end = 0xffffffff;
|
adreno_gpu_config.va_end = 0xffffffff;
|
||||||
|
|
|
@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
|
||||||
&pdpu->pipe_qos_cfg);
|
&pdpu->pipe_qos_cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
|
|
||||||
{
|
|
||||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
|
||||||
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
|
|
||||||
|
|
||||||
if (!pdpu->is_rt_pipe)
|
|
||||||
return;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
|
||||||
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
|
|
||||||
pm_runtime_put_sync(&dpu_kms->pdev->dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* _dpu_plane_set_ot_limit - set OT limit for the given plane
|
* _dpu_plane_set_ot_limit - set OT limit for the given plane
|
||||||
* @plane: Pointer to drm plane
|
* @plane: Pointer to drm plane
|
||||||
|
@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
|
||||||
|
{
|
||||||
|
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||||
|
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
|
||||||
|
|
||||||
|
if (!pdpu->is_rt_pipe)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pm_runtime_get_sync(&dpu_kms->pdev->dev);
|
||||||
|
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
|
||||||
|
pm_runtime_put_sync(&dpu_kms->pdev->dev);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t _dpu_plane_danger_read(struct file *file,
|
static ssize_t _dpu_plane_danger_read(struct file *file,
|
||||||
char __user *buff, size_t count, loff_t *ppos)
|
char __user *buff, size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
|
|
|
@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
|
||||||
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma);
|
struct msm_gem_vma *vma);
|
||||||
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
|
struct msm_gem_vma *vma, int prot,
|
||||||
|
struct sg_table *sgt, int npages);
|
||||||
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
|
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma);
|
struct msm_gem_vma *vma);
|
||||||
|
|
||||||
|
@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||||
struct dma_buf *dmabuf, struct sg_table *sgt);
|
struct dma_buf *dmabuf, struct sg_table *sgt);
|
||||||
|
|
||||||
|
__printf(2, 3)
|
||||||
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
|
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
|
||||||
|
|
||||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
|
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
|
||||||
|
@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
|
||||||
int msm_debugfs_late_init(struct drm_device *dev);
|
int msm_debugfs_late_init(struct drm_device *dev);
|
||||||
int msm_rd_debugfs_init(struct drm_minor *minor);
|
int msm_rd_debugfs_init(struct drm_minor *minor);
|
||||||
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
|
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
|
||||||
|
__printf(3, 4)
|
||||||
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||||
const char *fmt, ...);
|
const char *fmt, ...);
|
||||||
int msm_perf_debugfs_init(struct drm_minor *minor);
|
int msm_perf_debugfs_init(struct drm_minor *minor);
|
||||||
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
|
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
|
||||||
#else
|
#else
|
||||||
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
|
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
|
||||||
|
__printf(3, 4)
|
||||||
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||||
const char *fmt, ...) {}
|
const char *fmt, ...) {}
|
||||||
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
|
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
|
||||||
|
|
|
@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct msm_gem_vma *vma;
|
struct msm_gem_vma *vma;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
int prot = IOMMU_READ;
|
||||||
|
|
||||||
|
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
|
||||||
|
prot |= IOMMU_WRITE;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||||
|
|
||||||
|
@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
||||||
if (IS_ERR(pages))
|
if (IS_ERR(pages))
|
||||||
return PTR_ERR(pages);
|
return PTR_ERR(pages);
|
||||||
|
|
||||||
return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
|
return msm_gem_map_vma(aspace, vma, prot,
|
||||||
obj->size >> PAGE_SHIFT);
|
msm_obj->sgt, obj->size >> PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get iova and pin it. Should have a matching put */
|
/* get iova and pin it. Should have a matching put */
|
||||||
|
|
|
@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||||
|
|
||||||
int
|
int
|
||||||
msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
|
struct msm_gem_vma *vma, int prot,
|
||||||
|
struct sg_table *sgt, int npages)
|
||||||
{
|
{
|
||||||
unsigned size = npages << PAGE_SHIFT;
|
unsigned size = npages << PAGE_SHIFT;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||||
|
|
||||||
if (aspace->mmu)
|
if (aspace->mmu)
|
||||||
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
|
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
|
||||||
size, IOMMU_READ | IOMMU_WRITE);
|
size, prot);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
vma->mapped = false;
|
vma->mapped = false;
|
||||||
|
|
|
@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get Interrupt: */
|
/* Get Interrupt: */
|
||||||
gpu->irq = platform_get_irq_byname(pdev, config->irqname);
|
gpu->irq = platform_get_irq(pdev, 0);
|
||||||
if (gpu->irq < 0) {
|
if (gpu->irq < 0) {
|
||||||
ret = gpu->irq;
|
ret = gpu->irq;
|
||||||
DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
|
DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
|
||||||
|
|
|
@ -31,7 +31,6 @@ struct msm_gpu_state;
|
||||||
|
|
||||||
struct msm_gpu_config {
|
struct msm_gpu_config {
|
||||||
const char *ioname;
|
const char *ioname;
|
||||||
const char *irqname;
|
|
||||||
uint64_t va_start;
|
uint64_t va_start;
|
||||||
uint64_t va_end;
|
uint64_t va_end;
|
||||||
unsigned int nr_rings;
|
unsigned int nr_rings;
|
||||||
|
@ -63,7 +62,7 @@ struct msm_gpu_funcs {
|
||||||
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
|
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
|
||||||
void (*recover)(struct msm_gpu *gpu);
|
void (*recover)(struct msm_gpu *gpu);
|
||||||
void (*destroy)(struct msm_gpu *gpu);
|
void (*destroy)(struct msm_gpu *gpu);
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
|
||||||
/* show GPU status in debugfs: */
|
/* show GPU status in debugfs: */
|
||||||
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
||||||
struct drm_printer *p);
|
struct drm_printer *p);
|
||||||
|
|
|
@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
|
||||||
char *fptr = &fifo->buf[fifo->head];
|
char *fptr = &fifo->buf[fifo->head];
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
|
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
|
||||||
|
if (!rd->open)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Note that smp_load_acquire() is not strictly required
|
/* Note that smp_load_acquire() is not strictly required
|
||||||
* as CIRC_SPACE_TO_END() does not access the tail more
|
* as CIRC_SPACE_TO_END() does not access the tail more
|
||||||
|
@ -213,7 +215,10 @@ out:
|
||||||
static int rd_release(struct inode *inode, struct file *file)
|
static int rd_release(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct msm_rd_state *rd = inode->i_private;
|
struct msm_rd_state *rd = inode->i_private;
|
||||||
|
|
||||||
rd->open = false;
|
rd->open = false;
|
||||||
|
wake_up_all(&rd->fifo_event);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
|
||||||
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
||||||
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
|
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
|
||||||
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
|
||||||
|
|
||||||
|
clk_disable_unprepare(hdmi->tmds_clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
||||||
|
@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
|
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
|
||||||
|
|
||||||
|
clk_prepare_enable(hdmi->tmds_clk);
|
||||||
|
|
||||||
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
|
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
|
||||||
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
|
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
|
||||||
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
|
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
|
||||||
|
|
|
@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
|
||||||
{
|
{
|
||||||
struct hid_collection *collection;
|
struct hid_collection *collection;
|
||||||
unsigned usage;
|
unsigned usage;
|
||||||
|
int collection_index;
|
||||||
|
|
||||||
usage = parser->local.usage[0];
|
usage = parser->local.usage[0];
|
||||||
|
|
||||||
|
@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
|
||||||
parser->collection_stack[parser->collection_stack_ptr++] =
|
parser->collection_stack[parser->collection_stack_ptr++] =
|
||||||
parser->device->maxcollection;
|
parser->device->maxcollection;
|
||||||
|
|
||||||
collection = parser->device->collection +
|
collection_index = parser->device->maxcollection++;
|
||||||
parser->device->maxcollection++;
|
collection = parser->device->collection + collection_index;
|
||||||
collection->type = type;
|
collection->type = type;
|
||||||
collection->usage = usage;
|
collection->usage = usage;
|
||||||
collection->level = parser->collection_stack_ptr - 1;
|
collection->level = parser->collection_stack_ptr - 1;
|
||||||
collection->parent = parser->active_collection;
|
collection->parent_idx = (collection->level == 0) ? -1 :
|
||||||
parser->active_collection = collection;
|
parser->collection_stack[collection->level - 1];
|
||||||
|
|
||||||
if (type == HID_COLLECTION_APPLICATION)
|
if (type == HID_COLLECTION_APPLICATION)
|
||||||
parser->device->maxapplication++;
|
parser->device->maxapplication++;
|
||||||
|
@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
parser->collection_stack_ptr--;
|
parser->collection_stack_ptr--;
|
||||||
if (parser->active_collection)
|
|
||||||
parser->active_collection = parser->active_collection->parent;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid,
|
||||||
usage = &field->usage[i];
|
usage = &field->usage[i];
|
||||||
|
|
||||||
collection = &hid->collection[usage->collection_index];
|
collection = &hid->collection[usage->collection_index];
|
||||||
while (collection && collection != multiplier_collection)
|
while (collection->parent_idx != -1 &&
|
||||||
collection = collection->parent;
|
collection != multiplier_collection)
|
||||||
|
collection = &hid->collection[collection->parent_idx];
|
||||||
|
|
||||||
if (collection || multiplier_collection == NULL)
|
if (collection->parent_idx != -1 ||
|
||||||
|
multiplier_collection == NULL)
|
||||||
usage->resolution_multiplier = effective_multiplier;
|
usage->resolution_multiplier = effective_multiplier;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid,
|
||||||
* applicable fields later.
|
* applicable fields later.
|
||||||
*/
|
*/
|
||||||
multiplier_collection = &hid->collection[multiplier->usage->collection_index];
|
multiplier_collection = &hid->collection[multiplier->usage->collection_index];
|
||||||
while (multiplier_collection &&
|
while (multiplier_collection->parent_idx != -1 &&
|
||||||
multiplier_collection->type != HID_COLLECTION_LOGICAL)
|
multiplier_collection->type != HID_COLLECTION_LOGICAL)
|
||||||
multiplier_collection = multiplier_collection->parent;
|
multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
|
||||||
|
|
||||||
effective_multiplier = hid_calculate_multiplier(hid, multiplier);
|
effective_multiplier = hid_calculate_multiplier(hid, multiplier);
|
||||||
|
|
||||||
|
|
|
@ -461,6 +461,9 @@
|
||||||
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
|
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
|
||||||
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
|
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
|
||||||
|
|
||||||
|
#define I2C_VENDOR_ID_GOODIX 0x27c6
|
||||||
|
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
|
||||||
|
|
||||||
#define USB_VENDOR_ID_GOODTOUCH 0x1aad
|
#define USB_VENDOR_ID_GOODTOUCH 0x1aad
|
||||||
#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
|
#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
|
||||||
|
|
||||||
|
|
|
@ -179,6 +179,8 @@ static const struct i2c_hid_quirks {
|
||||||
I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
|
I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
|
||||||
{ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
|
{ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
|
||||||
I2C_HID_QUIRK_NO_RUNTIME_PM },
|
I2C_HID_QUIRK_NO_RUNTIME_PM },
|
||||||
|
{ I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
|
||||||
|
I2C_HID_QUIRK_NO_RUNTIME_PM },
|
||||||
{ 0, 0 }
|
{ 0, 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
||||||
int vmbus_disconnect_ring(struct vmbus_channel *channel)
|
int vmbus_disconnect_ring(struct vmbus_channel *channel)
|
||||||
{
|
{
|
||||||
struct vmbus_channel *cur_channel, *tmp;
|
struct vmbus_channel *cur_channel, *tmp;
|
||||||
unsigned long flags;
|
|
||||||
LIST_HEAD(list);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (channel->primary_channel != NULL)
|
if (channel->primary_channel != NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Snapshot the list of subchannels */
|
list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
|
||||||
spin_lock_irqsave(&channel->lock, flags);
|
|
||||||
list_splice_init(&channel->sc_list, &list);
|
|
||||||
spin_unlock_irqrestore(&channel->lock, flags);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
|
|
||||||
if (cur_channel->rescind)
|
if (cur_channel->rescind)
|
||||||
wait_for_completion(&cur_channel->rescind_event);
|
wait_for_completion(&cur_channel->rescind_event);
|
||||||
|
|
||||||
|
|
|
@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
||||||
pfn_cnt -= pgs_ol;
|
pfn_cnt -= pgs_ol;
|
||||||
/*
|
/*
|
||||||
* Check if the corresponding memory block is already
|
* Check if the corresponding memory block is already
|
||||||
* online by checking its last previously backed page.
|
* online. It is possible to observe struct pages still
|
||||||
* In case it is we need to bring rest (which was not
|
* being uninitialized here so check section instead.
|
||||||
* backed previously) online too.
|
* In case the section is online we need to bring the
|
||||||
|
* rest of pfns (which were not backed previously)
|
||||||
|
* online too.
|
||||||
*/
|
*/
|
||||||
if (start_pfn > has->start_pfn &&
|
if (start_pfn > has->start_pfn &&
|
||||||
!PageReserved(pfn_to_page(start_pfn - 1)))
|
online_section_nr(pfn_to_section_nr(start_pfn)))
|
||||||
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get various debug metrics for the specified ring buffer. */
|
/* Get various debug metrics for the specified ring buffer. */
|
||||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||||
struct hv_ring_buffer_debug_info *debug_info)
|
struct hv_ring_buffer_debug_info *debug_info)
|
||||||
{
|
{
|
||||||
u32 bytes_avail_towrite;
|
u32 bytes_avail_towrite;
|
||||||
u32 bytes_avail_toread;
|
u32 bytes_avail_toread;
|
||||||
|
|
||||||
if (ring_info->ring_buffer) {
|
if (!ring_info->ring_buffer)
|
||||||
hv_get_ringbuffer_availbytes(ring_info,
|
return -EINVAL;
|
||||||
&bytes_avail_toread,
|
|
||||||
&bytes_avail_towrite);
|
|
||||||
|
|
||||||
debug_info->bytes_avail_toread = bytes_avail_toread;
|
hv_get_ringbuffer_availbytes(ring_info,
|
||||||
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
&bytes_avail_toread,
|
||||||
debug_info->current_read_index =
|
&bytes_avail_towrite);
|
||||||
ring_info->ring_buffer->read_index;
|
debug_info->bytes_avail_toread = bytes_avail_toread;
|
||||||
debug_info->current_write_index =
|
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
||||||
ring_info->ring_buffer->write_index;
|
debug_info->current_read_index = ring_info->ring_buffer->read_index;
|
||||||
debug_info->current_interrupt_mask =
|
debug_info->current_write_index = ring_info->ring_buffer->write_index;
|
||||||
ring_info->ring_buffer->interrupt_mask;
|
debug_info->current_interrupt_mask
|
||||||
}
|
= ring_info->ring_buffer->interrupt_mask;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
||||||
|
|
||||||
|
|
|
@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info outbound;
|
struct hv_ring_buffer_debug_info outbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
&outbound);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
|
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(out_intr_mask);
|
static DEVICE_ATTR_RO(out_intr_mask);
|
||||||
|
@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info outbound;
|
struct hv_ring_buffer_debug_info outbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
&outbound);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
return sprintf(buf, "%d\n", outbound.current_read_index);
|
return sprintf(buf, "%d\n", outbound.current_read_index);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(out_read_index);
|
static DEVICE_ATTR_RO(out_read_index);
|
||||||
|
@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info outbound;
|
struct hv_ring_buffer_debug_info outbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
&outbound);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
return sprintf(buf, "%d\n", outbound.current_write_index);
|
return sprintf(buf, "%d\n", outbound.current_write_index);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(out_write_index);
|
static DEVICE_ATTR_RO(out_write_index);
|
||||||
|
@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info outbound;
|
struct hv_ring_buffer_debug_info outbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
&outbound);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
|
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(out_read_bytes_avail);
|
static DEVICE_ATTR_RO(out_read_bytes_avail);
|
||||||
|
@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info outbound;
|
struct hv_ring_buffer_debug_info outbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
&outbound);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
|
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(out_write_bytes_avail);
|
static DEVICE_ATTR_RO(out_write_bytes_avail);
|
||||||
|
@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info inbound;
|
struct hv_ring_buffer_debug_info inbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
|
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(in_intr_mask);
|
static DEVICE_ATTR_RO(in_intr_mask);
|
||||||
|
@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info inbound;
|
struct hv_ring_buffer_debug_info inbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", inbound.current_read_index);
|
return sprintf(buf, "%d\n", inbound.current_read_index);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(in_read_index);
|
static DEVICE_ATTR_RO(in_read_index);
|
||||||
|
@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info inbound;
|
struct hv_ring_buffer_debug_info inbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", inbound.current_write_index);
|
return sprintf(buf, "%d\n", inbound.current_write_index);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(in_write_index);
|
static DEVICE_ATTR_RO(in_write_index);
|
||||||
|
@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info inbound;
|
struct hv_ring_buffer_debug_info inbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
|
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(in_read_bytes_avail);
|
static DEVICE_ATTR_RO(in_read_bytes_avail);
|
||||||
|
@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
|
||||||
{
|
{
|
||||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||||
struct hv_ring_buffer_debug_info inbound;
|
struct hv_ring_buffer_debug_info inbound;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!hv_dev->channel)
|
if (!hv_dev->channel)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
||||||
return -EINVAL;
|
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
|
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(in_write_bytes_avail);
|
static DEVICE_ATTR_RO(in_write_bytes_avail);
|
||||||
|
|
|
@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
|
||||||
drive->proc = proc_mkdir(drive->name, parent);
|
drive->proc = proc_mkdir(drive->name, parent);
|
||||||
if (drive->proc) {
|
if (drive->proc) {
|
||||||
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
|
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
|
||||||
proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
|
proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
|
||||||
drive->proc, &ide_settings_proc_fops,
|
drive->proc, &ide_settings_proc_fops,
|
||||||
drive);
|
drive);
|
||||||
}
|
}
|
||||||
|
|
|
@ -252,6 +252,8 @@ static const struct xpad_device {
|
||||||
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
|
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
|
||||||
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
|
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
|
||||||
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
|
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
|
||||||
|
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||||
|
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||||
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
|
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
|
||||||
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||||
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
|
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
|
||||||
|
@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
|
||||||
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
|
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
|
||||||
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
|
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
|
||||||
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
|
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
|
||||||
|
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
|
||||||
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
|
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
|
||||||
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
||||||
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
|
#include <linux/overflow.h>
|
||||||
#include <linux/input/mt.h>
|
#include <linux/input/mt.h>
|
||||||
#include "../input-compat.h"
|
#include "../input-compat.h"
|
||||||
|
|
||||||
|
@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
|
||||||
static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||||
const struct input_absinfo *abs)
|
const struct input_absinfo *abs)
|
||||||
{
|
{
|
||||||
int min, max;
|
int min, max, range;
|
||||||
|
|
||||||
min = abs->minimum;
|
min = abs->minimum;
|
||||||
max = abs->maximum;
|
max = abs->maximum;
|
||||||
|
@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (abs->flat > max - min) {
|
if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
"%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
|
"%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
|
||||||
UINPUT_NAME, code, abs->flat, min, max);
|
UINPUT_NAME, code, abs->flat, min, max);
|
||||||
|
|
|
@ -195,6 +195,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
priv->dev = &pdev->dev;
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
priv->base = devm_ioremap_resource(&pdev->dev, res);
|
priv->base = devm_ioremap_resource(&pdev->dev, res);
|
||||||
if (IS_ERR(priv->base)) {
|
if (IS_ERR(priv->base)) {
|
||||||
|
@ -248,7 +250,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
|
||||||
goto err_irq;
|
goto err_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->dev = &pdev->dev;
|
|
||||||
device_init_wakeup(priv->dev, 1);
|
device_init_wakeup(priv->dev, 1);
|
||||||
platform_set_drvdata(pdev, priv);
|
platform_set_drvdata(pdev, priv);
|
||||||
|
|
||||||
|
|
|
@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06
|
||||||
|
|
||||||
config TOUCHSCREEN_RASPBERRYPI_FW
|
config TOUCHSCREEN_RASPBERRYPI_FW
|
||||||
tristate "Raspberry Pi's firmware base touch screen support"
|
tristate "Raspberry Pi's firmware base touch screen support"
|
||||||
depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST
|
depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
|
||||||
help
|
help
|
||||||
Say Y here if you have the official Raspberry Pi 7 inch screen on
|
Say Y here if you have the official Raspberry Pi 7 inch screen on
|
||||||
your system.
|
your system.
|
||||||
|
|
|
@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||||
* If we have reason to believe the IOMMU driver missed the initial
|
* If we have reason to believe the IOMMU driver missed the initial
|
||||||
* probe for dev, replay it to get things in order.
|
* probe for dev, replay it to get things in order.
|
||||||
*/
|
*/
|
||||||
if (dev->bus && !device_iommu_mapped(dev))
|
if (!err && dev->bus && !device_iommu_mapped(dev))
|
||||||
err = iommu_probe_device(dev);
|
err = iommu_probe_device(dev);
|
||||||
|
|
||||||
/* Ignore all other errors apart from EPROBE_DEFER */
|
/* Ignore all other errors apart from EPROBE_DEFER */
|
||||||
|
|
|
@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
|
||||||
* capi:cipher_api_spec-iv:ivopts
|
* capi:cipher_api_spec-iv:ivopts
|
||||||
*/
|
*/
|
||||||
tmp = &cipher_in[strlen("capi:")];
|
tmp = &cipher_in[strlen("capi:")];
|
||||||
cipher_api = strsep(&tmp, "-");
|
|
||||||
*ivmode = strsep(&tmp, ":");
|
/* Separate IV options if present, it can contain another '-' in hash name */
|
||||||
*ivopts = tmp;
|
*ivopts = strrchr(tmp, ':');
|
||||||
|
if (*ivopts) {
|
||||||
|
**ivopts = '\0';
|
||||||
|
(*ivopts)++;
|
||||||
|
}
|
||||||
|
/* Parse IV mode */
|
||||||
|
*ivmode = strrchr(tmp, '-');
|
||||||
|
if (*ivmode) {
|
||||||
|
**ivmode = '\0';
|
||||||
|
(*ivmode)++;
|
||||||
|
}
|
||||||
|
/* The rest is crypto API spec */
|
||||||
|
cipher_api = tmp;
|
||||||
|
|
||||||
if (*ivmode && !strcmp(*ivmode, "lmk"))
|
if (*ivmode && !strcmp(*ivmode, "lmk"))
|
||||||
cc->tfms_count = 64;
|
cc->tfms_count = 64;
|
||||||
|
@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
|
||||||
goto bad_mem;
|
goto bad_mem;
|
||||||
|
|
||||||
chainmode = strsep(&tmp, "-");
|
chainmode = strsep(&tmp, "-");
|
||||||
*ivopts = strsep(&tmp, "-");
|
*ivmode = strsep(&tmp, ":");
|
||||||
*ivmode = strsep(&*ivopts, ":");
|
*ivopts = tmp;
|
||||||
|
|
||||||
if (tmp)
|
|
||||||
DMWARN("Ignoring unexpected additional cipher options");
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For compatibility with the original dm-crypt mapping format, if
|
* For compatibility with the original dm-crypt mapping format, if
|
||||||
|
|
|
@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
uint32_t ref_count;
|
uint32_t ref_count;
|
||||||
|
@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
|
||||||
down_read(&pmd->root_lock);
|
down_read(&pmd->root_lock);
|
||||||
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
||||||
if (!r)
|
if (!r)
|
||||||
*result = (ref_count != 0);
|
*result = (ref_count > 1);
|
||||||
up_read(&pmd->root_lock);
|
up_read(&pmd->root_lock);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
|
||||||
|
|
||||||
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
|
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
|
||||||
|
|
||||||
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
||||||
|
|
||||||
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
||||||
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
||||||
|
|
|
@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
||||||
* passdown we have to check that these blocks are now unused.
|
* passdown we have to check that these blocks are now unused.
|
||||||
*/
|
*/
|
||||||
int r = 0;
|
int r = 0;
|
||||||
bool used = true;
|
bool shared = true;
|
||||||
struct thin_c *tc = m->tc;
|
struct thin_c *tc = m->tc;
|
||||||
struct pool *pool = tc->pool;
|
struct pool *pool = tc->pool;
|
||||||
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
|
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
|
||||||
|
@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
||||||
while (b != end) {
|
while (b != end) {
|
||||||
/* find start of unmapped run */
|
/* find start of unmapped run */
|
||||||
for (; b < end; b++) {
|
for (; b < end; b++) {
|
||||||
r = dm_pool_block_is_used(pool->pmd, b, &used);
|
r = dm_pool_block_is_shared(pool->pmd, b, &shared);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (!used)
|
if (!shared)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
||||||
|
|
||||||
/* find end of run */
|
/* find end of run */
|
||||||
for (e = b + 1; e != end; e++) {
|
for (e = b + 1; e != end; e++) {
|
||||||
r = dm_pool_block_is_used(pool->pmd, e, &used);
|
r = dm_pool_block_is_shared(pool->pmd, e, &shared);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (used)
|
if (shared)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
|
||||||
|
|
||||||
__bio_clone_fast(clone, bio);
|
__bio_clone_fast(clone, bio);
|
||||||
|
|
||||||
if (unlikely(bio_integrity(bio) != NULL)) {
|
if (bio_integrity(bio)) {
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
|
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
|
||||||
|
@ -1336,11 +1336,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
|
bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
|
||||||
clone->bi_iter.bi_size = to_bytes(len);
|
|
||||||
|
|
||||||
if (unlikely(bio_integrity(bio) != NULL))
|
|
||||||
bio_integrity_trim(clone);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1588,6 +1584,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
|
||||||
ci->sector = bio->bi_iter.bi_sector;
|
ci->sector = bio->bi_iter.bi_sector;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define __dm_part_stat_sub(part, field, subnd) \
|
||||||
|
(part_stat_get(part, field) -= (subnd))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Entry point to split a bio into clones and submit them to the targets.
|
* Entry point to split a bio into clones and submit them to the targets.
|
||||||
*/
|
*/
|
||||||
|
@ -1642,7 +1641,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
||||||
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
|
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
|
||||||
GFP_NOIO, &md->queue->bio_split);
|
GFP_NOIO, &md->queue->bio_split);
|
||||||
ci.io->orig_bio = b;
|
ci.io->orig_bio = b;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Adjust IO stats for each split, otherwise upon queue
|
||||||
|
* reentry there will be redundant IO accounting.
|
||||||
|
* NOTE: this is a stop-gap fix, a proper fix involves
|
||||||
|
* significant refactoring of DM core's bio splitting
|
||||||
|
* (by eliminating DM's splitting and just using bio_split)
|
||||||
|
*/
|
||||||
|
part_stat_lock();
|
||||||
|
__dm_part_stat_sub(&dm_disk(md)->part0,
|
||||||
|
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
|
||||||
|
part_stat_unlock();
|
||||||
|
|
||||||
bio_chain(b, bio);
|
bio_chain(b, bio);
|
||||||
|
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
|
||||||
ret = generic_make_request(bio);
|
ret = generic_make_request(bio);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1713,6 +1726,15 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static blk_qc_t dm_process_bio(struct mapped_device *md,
|
||||||
|
struct dm_table *map, struct bio *bio)
|
||||||
|
{
|
||||||
|
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
|
||||||
|
return __process_bio(md, map, bio);
|
||||||
|
else
|
||||||
|
return __split_and_process_bio(md, map, bio);
|
||||||
|
}
|
||||||
|
|
||||||
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = q->queuedata;
|
struct mapped_device *md = q->queuedata;
|
||||||
|
@ -1733,10 +1755,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
|
ret = dm_process_bio(md, map, bio);
|
||||||
ret = __process_bio(md, map, bio);
|
|
||||||
else
|
|
||||||
ret = __split_and_process_bio(md, map, bio);
|
|
||||||
|
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2415,9 +2434,9 @@ static void dm_wq_work(struct work_struct *work)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (dm_request_based(md))
|
if (dm_request_based(md))
|
||||||
generic_make_request(c);
|
(void) generic_make_request(c);
|
||||||
else
|
else
|
||||||
__split_and_process_bio(md, map, c);
|
(void) dm_process_bio(md, map, c);
|
||||||
}
|
}
|
||||||
|
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
|
|
@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
|
||||||
*
|
*
|
||||||
* Return:
|
* Return:
|
||||||
* 0 - Success
|
* 0 - Success
|
||||||
|
* Non-zero - Failure
|
||||||
*/
|
*/
|
||||||
static int ibmvmc_open(struct inode *inode, struct file *file)
|
static int ibmvmc_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct ibmvmc_file_session *session;
|
struct ibmvmc_file_session *session;
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
|
pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
|
||||||
(unsigned long)inode, (unsigned long)file,
|
(unsigned long)inode, (unsigned long)file,
|
||||||
ibmvmc.state);
|
ibmvmc.state);
|
||||||
|
|
||||||
session = kzalloc(sizeof(*session), GFP_KERNEL);
|
session = kzalloc(sizeof(*session), GFP_KERNEL);
|
||||||
|
if (!session)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
session->file = file;
|
session->file = file;
|
||||||
file->private_data = session;
|
file->private_data = session;
|
||||||
|
|
||||||
return rc;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
|
||||||
dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
|
dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
|
||||||
|
|
||||||
if (dma_setup_res->status) {
|
if (dma_setup_res->status) {
|
||||||
dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
|
u8 status = dma_setup_res->status;
|
||||||
dma_setup_res->status,
|
|
||||||
mei_hbm_status_str(dma_setup_res->status));
|
if (status == MEI_HBMS_NOT_ALLOWED) {
|
||||||
|
dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
|
||||||
|
} else {
|
||||||
|
dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
|
||||||
|
status,
|
||||||
|
mei_hbm_status_str(status));
|
||||||
|
}
|
||||||
dev->hbm_f_dr_supported = 0;
|
dev->hbm_f_dr_supported = 0;
|
||||||
mei_dmam_ring_free(dev);
|
mei_dmam_ring_free(dev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,6 +127,8 @@
|
||||||
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
|
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
|
||||||
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
|
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
|
||||||
|
|
||||||
|
#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
|
||||||
|
|
||||||
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
|
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
|
||||||
|
|
||||||
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
|
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
|
||||||
|
|
|
@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
|
||||||
|
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
|
||||||
|
|
||||||
|
{MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
|
||||||
|
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
|
||||||
|
|
||||||
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
|
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
|
||||||
|
|
|
@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
|
||||||
struct resource r;
|
struct resource r;
|
||||||
|
|
||||||
if (acpi_dev_resource_io(res, &r)) {
|
if (acpi_dev_resource_io(res, &r)) {
|
||||||
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||||
base = ioport_map(r.start, resource_size(&r));
|
base = ioport_map(r.start, resource_size(&r));
|
||||||
return AE_OK;
|
return AE_OK;
|
||||||
|
#else
|
||||||
|
return AE_ERROR;
|
||||||
|
#endif
|
||||||
} else if (acpi_dev_resource_memory(res, &r)) {
|
} else if (acpi_dev_resource_memory(res, &r)) {
|
||||||
base = ioremap(r.start, resource_size(&r));
|
base = ioremap(r.start, resource_size(&r));
|
||||||
return AE_OK;
|
return AE_OK;
|
||||||
|
|
|
@ -116,7 +116,7 @@ config MMC_RICOH_MMC
|
||||||
|
|
||||||
config MMC_SDHCI_ACPI
|
config MMC_SDHCI_ACPI
|
||||||
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
|
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
|
||||||
depends on MMC_SDHCI && ACPI
|
depends on MMC_SDHCI && ACPI && PCI
|
||||||
select IOSF_MBI if X86
|
select IOSF_MBI if X86
|
||||||
help
|
help
|
||||||
This selects support for ACPI enumerated SDHCI controllers,
|
This selects support for ACPI enumerated SDHCI controllers,
|
||||||
|
@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP
|
||||||
tristate "TI SDHCI Controller Support"
|
tristate "TI SDHCI Controller Support"
|
||||||
depends on MMC_SDHCI_PLTFM && OF
|
depends on MMC_SDHCI_PLTFM && OF
|
||||||
select THERMAL
|
select THERMAL
|
||||||
select TI_SOC_THERMAL
|
imply TI_SOC_THERMAL
|
||||||
help
|
help
|
||||||
This selects the Secure Digital Host Controller Interface (SDHCI)
|
This selects the Secure Digital Host Controller Interface (SDHCI)
|
||||||
support present in TI's DRA7 SOCs. The controller supports
|
support present in TI's DRA7 SOCs. The controller supports
|
||||||
|
|
|
@ -1,11 +1,6 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/*
|
/*
|
||||||
* Copyright (C) 2018 Mellanox Technologies.
|
* Copyright (C) 2018 Mellanox Technologies.
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation; either version 2 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
|
|
|
@ -179,6 +179,8 @@ struct meson_host {
|
||||||
struct sd_emmc_desc *descs;
|
struct sd_emmc_desc *descs;
|
||||||
dma_addr_t descs_dma_addr;
|
dma_addr_t descs_dma_addr;
|
||||||
|
|
||||||
|
int irq;
|
||||||
|
|
||||||
bool vqmmc_enabled;
|
bool vqmmc_enabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
|
||||||
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
{
|
{
|
||||||
struct meson_host *host = mmc_priv(mmc);
|
struct meson_host *host = mmc_priv(mmc);
|
||||||
|
int adj = 0;
|
||||||
|
|
||||||
|
/* enable signal resampling w/o delay */
|
||||||
|
adj = ADJUST_ADJ_EN;
|
||||||
|
writel(adj, host->regs + host->data->adjust);
|
||||||
|
|
||||||
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
||||||
}
|
}
|
||||||
|
@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||||
if (!IS_ERR(mmc->supply.vmmc))
|
if (!IS_ERR(mmc->supply.vmmc))
|
||||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||||
|
|
||||||
|
/* disable signal resampling */
|
||||||
|
writel(0, host->regs + host->data->adjust);
|
||||||
|
|
||||||
/* Reset rx phase */
|
/* Reset rx phase */
|
||||||
clk_set_phase(host->rx_clk, 0);
|
clk_set_phase(host->rx_clk, 0);
|
||||||
|
|
||||||
|
@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
|
||||||
|
|
||||||
static void meson_mmc_cfg_init(struct meson_host *host)
|
static void meson_mmc_cfg_init(struct meson_host *host)
|
||||||
{
|
{
|
||||||
u32 cfg = 0, adj = 0;
|
u32 cfg = 0;
|
||||||
|
|
||||||
cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
|
cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
|
||||||
ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
|
ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
|
||||||
|
@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host)
|
||||||
cfg |= CFG_ERR_ABORT;
|
cfg |= CFG_ERR_ABORT;
|
||||||
|
|
||||||
writel(cfg, host->regs + SD_EMMC_CFG);
|
writel(cfg, host->regs + SD_EMMC_CFG);
|
||||||
|
|
||||||
/* enable signal resampling w/o delay */
|
|
||||||
adj = ADJUST_ADJ_EN;
|
|
||||||
writel(adj, host->regs + host->data->adjust);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int meson_mmc_card_busy(struct mmc_host *mmc)
|
static int meson_mmc_card_busy(struct mmc_host *mmc)
|
||||||
|
@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
struct meson_host *host;
|
struct meson_host *host;
|
||||||
struct mmc_host *mmc;
|
struct mmc_host *mmc;
|
||||||
int ret, irq;
|
int ret;
|
||||||
|
|
||||||
mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
|
mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
|
||||||
if (!mmc)
|
if (!mmc)
|
||||||
|
@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
||||||
goto free_host;
|
goto free_host;
|
||||||
}
|
}
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
host->irq = platform_get_irq(pdev, 0);
|
||||||
if (irq <= 0) {
|
if (host->irq <= 0) {
|
||||||
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
|
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto free_host;
|
goto free_host;
|
||||||
|
@ -1331,9 +1337,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
||||||
writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
|
writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
|
||||||
host->regs + SD_EMMC_IRQ_EN);
|
host->regs + SD_EMMC_IRQ_EN);
|
||||||
|
|
||||||
ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
|
ret = request_threaded_irq(host->irq, meson_mmc_irq,
|
||||||
meson_mmc_irq_thread, IRQF_SHARED,
|
meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
|
||||||
NULL, host);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_init_clk;
|
goto err_init_clk;
|
||||||
|
|
||||||
|
@ -1351,7 +1356,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
||||||
if (host->bounce_buf == NULL) {
|
if (host->bounce_buf == NULL) {
|
||||||
dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
|
dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_init_clk;
|
goto err_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
|
host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
|
||||||
|
@ -1370,6 +1375,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
||||||
err_bounce_buf:
|
err_bounce_buf:
|
||||||
dma_free_coherent(host->dev, host->bounce_buf_size,
|
dma_free_coherent(host->dev, host->bounce_buf_size,
|
||||||
host->bounce_buf, host->bounce_dma_addr);
|
host->bounce_buf, host->bounce_dma_addr);
|
||||||
|
err_free_irq:
|
||||||
|
free_irq(host->irq, host);
|
||||||
err_init_clk:
|
err_init_clk:
|
||||||
clk_disable_unprepare(host->mmc_clk);
|
clk_disable_unprepare(host->mmc_clk);
|
||||||
err_core_clk:
|
err_core_clk:
|
||||||
|
@ -1387,6 +1394,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
/* disable interrupts */
|
/* disable interrupts */
|
||||||
writel(0, host->regs + SD_EMMC_IRQ_EN);
|
writel(0, host->regs + SD_EMMC_IRQ_EN);
|
||||||
|
free_irq(host->irq, host);
|
||||||
|
|
||||||
dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
|
dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
|
||||||
host->descs, host->descs_dma_addr);
|
host->descs, host->descs_dma_addr);
|
||||||
|
|
|
@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
iproc_host->data = iproc_data;
|
iproc_host->data = iproc_data;
|
||||||
|
|
||||||
mmc_of_parse(host->mmc);
|
ret = mmc_of_parse(host->mmc);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
sdhci_get_property(pdev);
|
sdhci_get_property(pdev);
|
||||||
|
|
||||||
host->mmc->caps |= iproc_host->data->mmc_caps;
|
host->mmc->caps |= iproc_host->data->mmc_caps;
|
||||||
|
|
|
@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
|
||||||
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
|
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
|
||||||
{
|
{
|
||||||
struct can_priv *priv = netdev_priv(dev);
|
struct can_priv *priv = netdev_priv(dev);
|
||||||
struct sk_buff *skb = priv->echo_skb[idx];
|
|
||||||
struct canfd_frame *cf;
|
|
||||||
|
|
||||||
if (idx >= priv->echo_skb_max) {
|
if (idx >= priv->echo_skb_max) {
|
||||||
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
|
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
|
||||||
|
@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!skb) {
|
if (priv->echo_skb[idx]) {
|
||||||
netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
|
/* Using "struct canfd_frame::len" for the frame
|
||||||
__func__, idx);
|
* length is supported on both CAN and CANFD frames.
|
||||||
return NULL;
|
*/
|
||||||
|
struct sk_buff *skb = priv->echo_skb[idx];
|
||||||
|
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
||||||
|
u8 len = cf->len;
|
||||||
|
|
||||||
|
*len_ptr = len;
|
||||||
|
priv->echo_skb[idx] = NULL;
|
||||||
|
|
||||||
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Using "struct canfd_frame::len" for the frame
|
return NULL;
|
||||||
* length is supported on both CAN and CANFD frames.
|
|
||||||
*/
|
|
||||||
cf = (struct canfd_frame *)skb->data;
|
|
||||||
*len_ptr = cf->len;
|
|
||||||
priv->echo_skb[idx] = NULL;
|
|
||||||
|
|
||||||
return skb;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* clear and invalidate unused mailboxes first */
|
/* clear and invalidate unused mailboxes first */
|
||||||
for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) {
|
for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
|
||||||
mb = flexcan_get_mb(priv, i);
|
mb = flexcan_get_mb(priv, i);
|
||||||
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
|
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
|
||||||
&mb->can_ctrl);
|
&mb->can_ctrl);
|
||||||
|
@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
|
||||||
gpr_np = of_find_node_by_phandle(phandle);
|
gpr_np = of_find_node_by_phandle(phandle);
|
||||||
if (!gpr_np) {
|
if (!gpr_np) {
|
||||||
dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
|
dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
|
||||||
return PTR_ERR(gpr_np);
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = netdev_priv(dev);
|
priv = netdev_priv(dev);
|
||||||
|
|
|
@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
|
||||||
|
|
||||||
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
|
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
|
||||||
priv->phy_iface);
|
priv->phy_iface);
|
||||||
if (IS_ERR(phydev))
|
if (IS_ERR(phydev)) {
|
||||||
netdev_err(dev, "Could not attach to PHY\n");
|
netdev_err(dev, "Could not attach to PHY\n");
|
||||||
|
phydev = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
@ -9,8 +9,9 @@ config FSL_DPAA2_ETH
|
||||||
|
|
||||||
config FSL_DPAA2_PTP_CLOCK
|
config FSL_DPAA2_PTP_CLOCK
|
||||||
tristate "Freescale DPAA2 PTP Clock"
|
tristate "Freescale DPAA2 PTP Clock"
|
||||||
depends on FSL_DPAA2_ETH && POSIX_TIMERS
|
depends on FSL_DPAA2_ETH
|
||||||
select PTP_1588_CLOCK
|
imply PTP_1588_CLOCK
|
||||||
|
default y
|
||||||
help
|
help
|
||||||
This driver adds support for using the DPAA2 1588 timer module
|
This driver adds support for using the DPAA2 1588 timer module
|
||||||
as a PTP clock.
|
as a PTP clock.
|
||||||
|
|
|
@ -3467,7 +3467,7 @@ fec_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed_clk_ipg;
|
goto failed_clk_ipg;
|
||||||
|
|
||||||
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
|
fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
|
||||||
if (!IS_ERR(fep->reg_phy)) {
|
if (!IS_ERR(fep->reg_phy)) {
|
||||||
ret = regulator_enable(fep->reg_phy);
|
ret = regulator_enable(fep->reg_phy);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue