Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
1b10d388d0
|
@ -100,6 +100,10 @@ modules.order
|
|||
/include/ksym/
|
||||
/arch/*/include/generated/
|
||||
|
||||
# Generated lkdtm tests
|
||||
/tools/testing/selftests/lkdtm/*.sh
|
||||
!/tools/testing/selftests/lkdtm/run.sh
|
||||
|
||||
# stgit generated dirs
|
||||
patches-*
|
||||
|
||||
|
|
5
CREDITS
5
CREDITS
|
@ -567,6 +567,11 @@ D: Original author of Amiga FFS filesystem
|
|||
S: Orlando, Florida
|
||||
S: USA
|
||||
|
||||
N: Paul Burton
|
||||
E: paulburton@kernel.org
|
||||
W: https://pburton.com
|
||||
D: MIPS maintainer 2018-2020
|
||||
|
||||
N: Lennert Buytenhek
|
||||
E: kernel@wantstofly.org
|
||||
D: Original (2.4) rewrite of the ethernet bridging code
|
||||
|
|
|
@ -62,6 +62,30 @@ Or more shorter, written as following::
|
|||
In both styles, same key words are automatically merged when parsing it
|
||||
at boot time. So you can append similar trees or key-values.
|
||||
|
||||
Same-key Values
|
||||
---------------
|
||||
|
||||
It is prohibited that two or more values or arrays share a same-key.
|
||||
For example,::
|
||||
|
||||
foo = bar, baz
|
||||
foo = qux # !ERROR! we can not re-define same key
|
||||
|
||||
If you want to append the value to existing key as an array member,
|
||||
you can use ``+=`` operator. For example::
|
||||
|
||||
foo = bar, baz
|
||||
foo += qux
|
||||
|
||||
In this case, the key ``foo`` has ``bar``, ``baz`` and ``qux``.
|
||||
|
||||
However, a sub-key and a value can not co-exist under a parent key.
|
||||
For example, following config is NOT allowed.::
|
||||
|
||||
foo = value1
|
||||
foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist
|
||||
|
||||
|
||||
Comments
|
||||
--------
|
||||
|
||||
|
@ -102,9 +126,13 @@ Boot Kernel With a Boot Config
|
|||
==============================
|
||||
|
||||
Since the boot configuration file is loaded with initrd, it will be added
|
||||
to the end of the initrd (initramfs) image file. The Linux kernel decodes
|
||||
the last part of the initrd image in memory to get the boot configuration
|
||||
data.
|
||||
to the end of the initrd (initramfs) image file with size, checksum and
|
||||
12-byte magic word as below.
|
||||
|
||||
[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
|
||||
|
||||
The Linux kernel decodes the last part of the initrd image in memory to
|
||||
get the boot configuration data.
|
||||
Because of this "piggyback" method, there is no need to change or
|
||||
update the boot loader and the kernel image itself.
|
||||
|
||||
|
|
|
@ -136,6 +136,10 @@
|
|||
dynamic table installation which will install SSDT
|
||||
tables to /sys/firmware/acpi/tables/dynamic.
|
||||
|
||||
acpi_no_watchdog [HW,ACPI,WDT]
|
||||
Ignore the ACPI-based watchdog interface (WDAT) and let
|
||||
a native driver control the watchdog device instead.
|
||||
|
||||
acpi_rsdp= [ACPI,EFI,KEXEC]
|
||||
Pass the RSDP address to the kernel, mostly used
|
||||
on machines running EFI runtime service to boot the
|
||||
|
|
|
@ -551,6 +551,7 @@ options to your ``.config``:
|
|||
Once the kernel is built and installed, a simple
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
modprobe example-test
|
||||
|
||||
...will run the tests.
|
||||
|
|
|
@ -765,7 +765,7 @@ is not sufficient this sometimes needs to be explicit.
|
|||
Example::
|
||||
|
||||
#arch/x86/boot/Makefile
|
||||
subdir- := compressed/
|
||||
subdir- := compressed
|
||||
|
||||
The above assignment instructs kbuild to descend down in the
|
||||
directory compressed/ when "make clean" is executed.
|
||||
|
@ -1379,9 +1379,6 @@ See subsequent chapter for the syntax of the Kbuild file.
|
|||
in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
|
||||
a wrapper of the asm-generic one.
|
||||
|
||||
The convention is to list one subdir per line and
|
||||
preferably in alphabetic order.
|
||||
|
||||
8 Kbuild Variables
|
||||
==================
|
||||
|
||||
|
|
|
@ -487,8 +487,9 @@ phy_register_fixup_for_id()::
|
|||
The stubs set one of the two matching criteria, and set the other one to
|
||||
match anything.
|
||||
|
||||
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
|
||||
unregister fixup and free allocate memory are required.
|
||||
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module load
|
||||
time, the module needs to unregister the fixup and free allocated memory when
|
||||
it's unloaded.
|
||||
|
||||
Call one of following function before unloading module::
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ Power Management
|
|||
drivers-testing
|
||||
energy-model
|
||||
freezing-of-tasks
|
||||
interface
|
||||
opp
|
||||
pci
|
||||
pm_qos_interface
|
||||
|
|
|
@ -30,4 +30,4 @@ if [ -n "$parallel" ] ; then
|
|||
parallel="-j$parallel"
|
||||
fi
|
||||
|
||||
exec "$sphinx" "$parallel" "$@"
|
||||
exec "$sphinx" $parallel "$@"
|
||||
|
|
|
@ -4611,35 +4611,38 @@ unpins the VPA pages and releases all the device pages that are used to
|
|||
track the secure pages by hypervisor.
|
||||
|
||||
4.122 KVM_S390_NORMAL_RESET
|
||||
---------------------------
|
||||
|
||||
Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
:Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
:Architectures: s390
|
||||
:Type: vcpu ioctl
|
||||
:Parameters: none
|
||||
:Returns: 0
|
||||
|
||||
This ioctl resets VCPU registers and control structures according to
|
||||
the cpu reset definition in the POP (Principles Of Operation).
|
||||
|
||||
4.123 KVM_S390_INITIAL_RESET
|
||||
----------------------------
|
||||
|
||||
Capability: none
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
:Capability: none
|
||||
:Architectures: s390
|
||||
:Type: vcpu ioctl
|
||||
:Parameters: none
|
||||
:Returns: 0
|
||||
|
||||
This ioctl resets VCPU registers and control structures according to
|
||||
the initial cpu reset definition in the POP. However, the cpu is not
|
||||
put into ESA mode. This reset is a superset of the normal reset.
|
||||
|
||||
4.124 KVM_S390_CLEAR_RESET
|
||||
--------------------------
|
||||
|
||||
Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
:Capability: KVM_CAP_S390_VCPU_RESETS
|
||||
:Architectures: s390
|
||||
:Type: vcpu ioctl
|
||||
:Parameters: none
|
||||
:Returns: 0
|
||||
|
||||
This ioctl resets VCPU registers and control structures according to
|
||||
the clear cpu reset definition in the POP. However, the cpu is not put
|
||||
|
|
|
@ -19,7 +19,6 @@ x86-specific Documentation
|
|||
tlb
|
||||
mtrr
|
||||
pat
|
||||
intel_mpx
|
||||
intel-iommu
|
||||
intel_txt
|
||||
amd-memory-encryption
|
||||
|
|
|
@ -11115,14 +11115,12 @@ S: Maintained
|
|||
F: drivers/usb/image/microtek.*
|
||||
|
||||
MIPS
|
||||
M: Ralf Baechle <ralf@linux-mips.org>
|
||||
M: Paul Burton <paulburton@kernel.org>
|
||||
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
|
||||
L: linux-mips@vger.kernel.org
|
||||
W: http://www.linux-mips.org/
|
||||
T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
|
||||
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mips/
|
||||
F: Documentation/mips/
|
||||
F: arch/mips/
|
||||
|
@ -12741,7 +12739,7 @@ M: Tom Joseph <tjoseph@cadence.com>
|
|||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/cdns,*.txt
|
||||
F: drivers/pci/controller/pcie-cadence*
|
||||
F: drivers/pci/controller/cadence/
|
||||
|
||||
PCI DRIVER FOR FREESCALE LAYERSCAPE
|
||||
M: Minghuan Lian <minghuan.Lian@nxp.com>
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -68,6 +68,7 @@ unexport GREP_OPTIONS
|
|||
#
|
||||
# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
|
||||
# If KBUILD_VERBOSE equals 1 then the above command is displayed.
|
||||
# If KBUILD_VERBOSE equals 2 then give the reason why each target is rebuilt.
|
||||
#
|
||||
# To put more focus on warnings, be less verbose as default
|
||||
# Use 'make V=1' to see the full commands
|
||||
|
@ -1238,7 +1239,7 @@ ifneq ($(dtstree),)
|
|||
%.dtb: include/config/kernel.release scripts_dtc
|
||||
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
|
||||
|
||||
PHONY += dtbs dtbs_install dt_binding_check
|
||||
PHONY += dtbs dtbs_install dtbs_check
|
||||
dtbs dtbs_check: include/config/kernel.release scripts_dtc
|
||||
$(Q)$(MAKE) $(build)=$(dtstree)
|
||||
|
||||
|
@ -1258,6 +1259,7 @@ PHONY += scripts_dtc
|
|||
scripts_dtc: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=scripts/dtc
|
||||
|
||||
PHONY += dt_binding_check
|
||||
dt_binding_check: scripts_dtc
|
||||
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
|
||||
|
||||
|
|
|
@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
|
|||
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void) {}
|
||||
static inline void kvm_arm_vhe_guest_exit(void) {}
|
||||
|
||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
|
|||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u32 irq)
|
||||
static __always_inline void gic_write_dir(u32 irq)
|
||||
{
|
||||
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
|
||||
isb();
|
||||
|
|
|
@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
|
|||
return test_bit(ICACHEF_ALIASING, &__icache_flags);
|
||||
}
|
||||
|
||||
static inline int icache_is_vpipt(void)
|
||||
static __always_inline int icache_is_vpipt(void)
|
||||
{
|
||||
return test_bit(ICACHEF_VPIPT, &__icache_flags);
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
|
|||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
|
||||
static inline void __flush_icache_all(void)
|
||||
static __always_inline void __flush_icache_all(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
|
||||
return;
|
||||
|
|
|
@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
|
|||
return cpuid_feature_extract_signed_field_width(features, field, 4);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__
|
||||
static __always_inline unsigned int __attribute_const__
|
||||
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
|
||||
{
|
||||
return (u64)(features << (64 - width - field)) >> (64 - width);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__
|
||||
static __always_inline unsigned int __attribute_const__
|
||||
cpuid_feature_extract_unsigned_field(u64 features, int field)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
|
||||
|
@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
|
|||
return val == 0x1;
|
||||
}
|
||||
|
||||
static inline bool system_supports_fpsimd(void)
|
||||
static __always_inline bool system_supports_fpsimd(void)
|
||||
{
|
||||
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
||||
}
|
||||
|
@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
|
|||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
static inline bool system_supports_sve(void)
|
||||
static __always_inline bool system_supports_sve(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||
cpus_have_const_cap(ARM64_SVE);
|
||||
}
|
||||
|
||||
static inline bool system_supports_cnp(void)
|
||||
static __always_inline bool system_supports_cnp(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_CNP) &&
|
||||
cpus_have_const_cap(ARM64_HAS_CNP);
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
|||
}
|
||||
|
||||
#define __raw_writel __raw_writel
|
||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|||
}
|
||||
|
||||
#define __raw_readl __raw_readl
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
|
||||
|
|
|
@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
|
|||
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !(vcpu->arch.hcr_el2 & HCR_RW);
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
|
|||
vcpu->arch.vsesr_el2 = vsesr;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
|
||||
}
|
||||
|
@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
|
|||
*__vcpu_elr_el1(vcpu) = v;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
|
||||
}
|
||||
|
||||
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return kvm_condition_valid32(vcpu);
|
||||
|
@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|||
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
|
||||
* AArch32 with banked registers.
|
||||
*/
|
||||
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||
u8 reg_num)
|
||||
{
|
||||
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
||||
}
|
||||
|
||||
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||
unsigned long val)
|
||||
{
|
||||
if (reg_num != 31)
|
||||
|
@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
|||
return mode != PSR_MODE_EL0t;
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.esr_el2;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
|
@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.far_el2;
|
||||
}
|
||||
|
||||
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
|
||||
}
|
||||
|
@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
|||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
|
||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||
|
@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
|||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
||||
}
|
||||
|
||||
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||
}
|
||||
|
||||
/* This one is not specific to Data Abort */
|
||||
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
||||
}
|
||||
|
@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
||||
case FSC_SEA:
|
||||
|
@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
return ESR_ELx_SYS64_ISS_RT(esr);
|
||||
|
@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
|||
return data; /* Leave LE untouched */
|
||||
}
|
||||
|
||||
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
kvm_skip_instr32(vcpu, is_wide_instr);
|
||||
|
@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
|||
* Skip an instruction which has been emulated at hyp while most guest sysregs
|
||||
* are live.
|
||||
*/
|
||||
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
|
||||
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
|
|
|
@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
|
|||
static inline void kvm_clr_pmu_events(u32 clr) {}
|
||||
#endif
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void)
|
||||
{
|
||||
local_daif_mask();
|
||||
|
||||
/*
|
||||
* Having IRQs masked via PMR when entering the guest means the GIC
|
||||
* will not signal the CPU of interrupts of lower priority, and the
|
||||
* only way to get out will be via guest exceptions.
|
||||
* Naturally, we want to avoid this.
|
||||
*
|
||||
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
|
||||
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
|
||||
*/
|
||||
pmr_sync();
|
||||
}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_exit(void)
|
||||
{
|
||||
/*
|
||||
* local_daif_restore() takes care to properly restore PSTATE.DAIF
|
||||
* and the GIC PMR if the host is using IRQ priorities.
|
||||
*/
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
||||
/*
|
||||
* When we exit from the guest we change a number of CPU configuration
|
||||
* parameters, such as traps. Make sure these changes take effect
|
||||
* before running the host or additional guests.
|
||||
*/
|
||||
isb();
|
||||
}
|
||||
|
||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
|
|
@ -47,6 +47,13 @@
|
|||
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
|
||||
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
|
||||
|
||||
/*
|
||||
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
|
||||
* static inline can allow the compiler to out-of-line this. KVM always wants
|
||||
* the macro version as its always inlined.
|
||||
*/
|
||||
#define __kvm_swab32(x) ___constant_swab32(x)
|
||||
|
||||
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
|
|||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
void kvm_compute_layout(void);
|
||||
|
||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
|
||||
"ror %0, %0, #1\n"
|
||||
|
@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
|||
extern void *__kvm_bp_vect_base;
|
||||
extern int __kvm_harden_el2_vector_slot;
|
||||
|
||||
/* This is only called on a VHE system */
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||
|
|
|
@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
return read_sysreg(CurrentEL) == CurrentEL_EL2;
|
||||
}
|
||||
|
||||
static inline bool has_vhe(void)
|
||||
static __always_inline bool has_vhe(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
||||
return true;
|
||||
|
|
|
@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
|
|||
}
|
||||
|
||||
/* Switch to the guest for VHE systems running in EL2 */
|
||||
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_cpu_context *guest_ctxt;
|
||||
|
@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
return exit_code;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
|
||||
NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
|
||||
|
||||
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
local_daif_mask();
|
||||
|
||||
/*
|
||||
* Having IRQs masked via PMR when entering the guest means the GIC
|
||||
* will not signal the CPU of interrupts of lower priority, and the
|
||||
* only way to get out will be via guest exceptions.
|
||||
* Naturally, we want to avoid this.
|
||||
*
|
||||
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
|
||||
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
|
||||
*/
|
||||
pmr_sync();
|
||||
|
||||
ret = __kvm_vcpu_run_vhe(vcpu);
|
||||
|
||||
/*
|
||||
* local_daif_restore() takes care to properly restore PSTATE.DAIF
|
||||
* and the GIC PMR if the host is using IRQ priorities.
|
||||
*/
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
||||
/*
|
||||
* When we exit from the guest we change a number of CPU configuration
|
||||
* parameters, such as traps. Make sure these changes take effect
|
||||
* before running the host or additional guests.
|
||||
*/
|
||||
isb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Switch to the guest for legacy non-VHE systems */
|
||||
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
|||
u32 data = vcpu_get_reg(vcpu, rd);
|
||||
if (__is_be(vcpu)) {
|
||||
/* guest pre-swabbed data, undo this for writel() */
|
||||
data = swab32(data);
|
||||
data = __kvm_swab32(data);
|
||||
}
|
||||
writel_relaxed(data, addr);
|
||||
} else {
|
||||
u32 data = readl_relaxed(addr);
|
||||
if (__is_be(vcpu)) {
|
||||
/* guest expects swabbed data */
|
||||
data = swab32(data);
|
||||
data = __kvm_swab32(data);
|
||||
}
|
||||
vcpu_set_reg(vcpu, rd, data);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <dt-bindings/clock/jz4740-cgu.h>
|
||||
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
|
@ -45,14 +46,6 @@
|
|||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
watchdog: watchdog@10002000 {
|
||||
compatible = "ingenic,jz4740-watchdog";
|
||||
reg = <0x10002000 0x10>;
|
||||
|
||||
clocks = <&cgu JZ4740_CLK_RTC>;
|
||||
clock-names = "rtc";
|
||||
};
|
||||
|
||||
tcu: timer@10002000 {
|
||||
compatible = "ingenic,jz4740-tcu", "simple-mfd";
|
||||
reg = <0x10002000 0x1000>;
|
||||
|
@ -73,6 +66,14 @@
|
|||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <23 22 21>;
|
||||
|
||||
watchdog: watchdog@0 {
|
||||
compatible = "ingenic,jz4740-watchdog";
|
||||
reg = <0x0 0xc>;
|
||||
|
||||
clocks = <&tcu TCU_CLK_WDT>;
|
||||
clock-names = "wdt";
|
||||
};
|
||||
};
|
||||
|
||||
rtc_dev: rtc@10003000 {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <dt-bindings/clock/jz4780-cgu.h>
|
||||
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||
#include <dt-bindings/dma/jz4780-dma.h>
|
||||
|
||||
/ {
|
||||
|
@ -67,6 +68,14 @@
|
|||
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <27 26 25>;
|
||||
|
||||
watchdog: watchdog@0 {
|
||||
compatible = "ingenic,jz4780-watchdog";
|
||||
reg = <0x0 0xc>;
|
||||
|
||||
clocks = <&tcu TCU_CLK_WDT>;
|
||||
clock-names = "wdt";
|
||||
};
|
||||
};
|
||||
|
||||
rtc_dev: rtc@10003000 {
|
||||
|
@ -348,14 +357,6 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
watchdog: watchdog@10002000 {
|
||||
compatible = "ingenic,jz4780-watchdog";
|
||||
reg = <0x10002000 0x10>;
|
||||
|
||||
clocks = <&cgu JZ4780_CLK_RTCLK>;
|
||||
clock-names = "rtc";
|
||||
};
|
||||
|
||||
nemc: nemc@13410000 {
|
||||
compatible = "ingenic,jz4780-nemc";
|
||||
reg = <0x13410000 0x10000>;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||
#include <dt-bindings/clock/x1000-cgu.h>
|
||||
#include <dt-bindings/dma/x1000-dma.h>
|
||||
|
||||
|
@ -72,7 +73,7 @@
|
|||
compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
|
||||
reg = <0x0 0x10>;
|
||||
|
||||
clocks = <&cgu X1000_CLK_RTCLK>;
|
||||
clocks = <&tcu TCU_CLK_WDT>;
|
||||
clock-names = "wdt";
|
||||
};
|
||||
};
|
||||
|
@ -158,7 +159,6 @@
|
|||
i2c0: i2c-controller@10050000 {
|
||||
compatible = "ingenic,x1000-i2c";
|
||||
reg = <0x10050000 0x1000>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -173,7 +173,6 @@
|
|||
i2c1: i2c-controller@10051000 {
|
||||
compatible = "ingenic,x1000-i2c";
|
||||
reg = <0x10051000 0x1000>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -188,7 +187,6 @@
|
|||
i2c2: i2c-controller@10052000 {
|
||||
compatible = "ingenic,x1000-i2c";
|
||||
reg = <0x10052000 0x1000>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
|
|
@ -155,9 +155,11 @@
|
|||
* effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
|
||||
* optimized memory barrier primitives."). Here we specify that the affected
|
||||
* sync instructions should be emitted twice.
|
||||
* Note that this expression is evaluated by the assembler (not the compiler),
|
||||
* and that the assembler evaluates '==' as 0 or -1, not 0 or 1.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
|
||||
# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb))
|
||||
#else
|
||||
# define __SYNC_rpt(type) 1
|
||||
#endif
|
||||
|
|
|
@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
|
|||
{
|
||||
list_del(&v->list);
|
||||
if (v->load_addr)
|
||||
release_progmem(v);
|
||||
release_progmem(v->load_addr);
|
||||
kfree(v);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ endif
|
|||
cflags-vdso := $(ccflags-vdso) \
|
||||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||
-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
|
||||
-mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
|
||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||
$(call cc-option, -fno-asynchronous-unwind-tables) \
|
||||
$(call cc-option, -fno-stack-protector)
|
||||
|
@ -51,6 +52,8 @@ endif
|
|||
|
||||
CFLAGS_REMOVE_vgettimeofday.o = -pg
|
||||
|
||||
DISABLE_VDSO := n
|
||||
|
||||
#
|
||||
# For the pre-R6 code in arch/mips/vdso/vdso.h for locating
|
||||
# the base address of VDSO, the linker will emit a R_MIPS_PC32
|
||||
|
@ -64,11 +67,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
|
|||
ifndef CONFIG_CPU_MIPSR6
|
||||
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
|
||||
$(warning MIPS VDSO requires binutils >= 2.25)
|
||||
obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
|
||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||
DISABLE_VDSO := y
|
||||
endif
|
||||
endif
|
||||
|
||||
#
|
||||
# GCC (at least up to version 9.2) appears to emit function calls that make use
|
||||
# of the GOT when targeting microMIPS, which we can't use in the VDSO due to
|
||||
# the lack of relocations. As such, we disable the VDSO for microMIPS builds.
|
||||
#
|
||||
ifdef CONFIG_CPU_MICROMIPS
|
||||
DISABLE_VDSO := y
|
||||
endif
|
||||
|
||||
ifeq ($(DISABLE_VDSO),y)
|
||||
obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
|
||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||
endif
|
||||
|
||||
# VDSO linker flags.
|
||||
VDSO_LDFLAGS := \
|
||||
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
|
||||
|
@ -81,12 +97,18 @@ GCOV_PROFILE := n
|
|||
UBSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# Check that we don't have PIC 'jalr t9' calls left
|
||||
quiet_cmd_vdso_mips_check = VDSOCHK $@
|
||||
cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \
|
||||
then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \
|
||||
rm -f $@; /bin/false); fi
|
||||
|
||||
#
|
||||
# Shared build commands.
|
||||
#
|
||||
|
||||
quiet_cmd_vdsold_and_vdso_check = LD $@
|
||||
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
|
||||
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check)
|
||||
|
||||
quiet_cmd_vdsold = VDSO $@
|
||||
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
Image
|
||||
Image.gz
|
||||
loader
|
||||
loader.lds
|
||||
|
|
|
@ -72,6 +72,16 @@
|
|||
#define EXC_LOAD_PAGE_FAULT 13
|
||||
#define EXC_STORE_PAGE_FAULT 15
|
||||
|
||||
/* PMP configuration */
|
||||
#define PMP_R 0x01
|
||||
#define PMP_W 0x02
|
||||
#define PMP_X 0x04
|
||||
#define PMP_A 0x18
|
||||
#define PMP_A_TOR 0x08
|
||||
#define PMP_A_NA4 0x10
|
||||
#define PMP_A_NAPOT 0x18
|
||||
#define PMP_L 0x80
|
||||
|
||||
/* symbolic CSR names: */
|
||||
#define CSR_CYCLE 0xc00
|
||||
#define CSR_TIME 0xc01
|
||||
|
@ -100,6 +110,8 @@
|
|||
#define CSR_MCAUSE 0x342
|
||||
#define CSR_MTVAL 0x343
|
||||
#define CSR_MIP 0x344
|
||||
#define CSR_PMPCFG0 0x3a0
|
||||
#define CSR_PMPADDR0 0x3b0
|
||||
#define CSR_MHARTID 0xf14
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
|
|
|
@ -58,6 +58,12 @@ _start_kernel:
|
|||
/* Reset all registers except ra, a0, a1 */
|
||||
call reset_regs
|
||||
|
||||
/* Setup a PMP to permit access to all of memory. */
|
||||
li a0, -1
|
||||
csrw CSR_PMPADDR0, a0
|
||||
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
|
||||
csrw CSR_PMPCFG0, a0
|
||||
|
||||
/*
|
||||
* The hartid in a0 is expected later on, and we have no firmware
|
||||
* to hand it to us.
|
||||
|
|
|
@ -156,6 +156,6 @@ void __init trap_init(void)
|
|||
csr_write(CSR_SCRATCH, 0);
|
||||
/* Set the exception vector address */
|
||||
csr_write(CSR_TVEC, &handle_exception);
|
||||
/* Enable all interrupts */
|
||||
csr_write(CSR_IE, -1);
|
||||
/* Enable interrupts */
|
||||
csr_write(CSR_IE, IE_SIE | IE_EIE);
|
||||
}
|
||||
|
|
|
@ -19,18 +19,20 @@ asmlinkage void __init kasan_early_init(void)
|
|||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
PAGE_KERNEL));
|
||||
PAGE_KERNEL));
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; ++i)
|
||||
set_pmd(kasan_early_shadow_pmd + i,
|
||||
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
pfn_pmd(PFN_DOWN
|
||||
(__pa((uintptr_t) kasan_early_shadow_pte)),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
||||
i += PGDIR_SIZE, ++pgd)
|
||||
set_pgd(pgd,
|
||||
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
pfn_pgd(PFN_DOWN
|
||||
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
/* init for swapper_pg_dir */
|
||||
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
||||
|
@ -38,37 +40,43 @@ asmlinkage void __init kasan_early_init(void)
|
|||
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
||||
i += PGDIR_SIZE, ++pgd)
|
||||
set_pgd(pgd,
|
||||
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
pfn_pgd(PFN_DOWN
|
||||
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
static void __init populate(void *start, void *end)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long i, offset;
|
||||
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
||||
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
||||
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
|
||||
unsigned long n_ptes =
|
||||
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
|
||||
unsigned long n_pmds =
|
||||
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
|
||||
n_pages / PTRS_PER_PTE;
|
||||
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
|
||||
|
||||
pte_t *pte =
|
||||
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
|
||||
pmd_t *pmd =
|
||||
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
||||
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
|
||||
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < n_pages; i++) {
|
||||
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
||||
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
||||
}
|
||||
|
||||
for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
|
||||
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
|
||||
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
|
||||
set_pmd(&pmd[i],
|
||||
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
|
||||
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
|
||||
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
|
||||
set_pgd(&pgd[i],
|
||||
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
|
@ -81,7 +89,8 @@ void __init kasan_init(void)
|
|||
unsigned long i;
|
||||
|
||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
(void *)kasan_mem_to_shadow((void *)
|
||||
VMALLOC_END));
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
void *start = (void *)__va(reg->base);
|
||||
|
@ -90,14 +99,14 @@ void __init kasan_init(void)
|
|||
if (start >= end)
|
||||
break;
|
||||
|
||||
populate(kasan_mem_to_shadow(start),
|
||||
kasan_mem_to_shadow(end));
|
||||
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
||||
};
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
set_pte(&kasan_early_shadow_pte[i],
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READ |
|
||||
_PAGE_ACCESSED)));
|
||||
|
||||
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
||||
init_task.kasan_depth = 0;
|
||||
|
|
|
@ -19,7 +19,14 @@ struct task_struct;
|
|||
void io_bitmap_share(struct task_struct *tsk);
|
||||
void io_bitmap_exit(void);
|
||||
|
||||
void tss_update_io_bitmap(void);
|
||||
void native_tss_update_io_bitmap(void);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define tss_update_io_bitmap native_tss_update_io_bitmap
|
||||
#endif
|
||||
|
||||
#else
|
||||
static inline void io_bitmap_share(struct task_struct *tsk) { }
|
||||
static inline void io_bitmap_exit(void) { }
|
||||
|
|
|
@ -292,6 +292,14 @@ enum x86emul_mode {
|
|||
#define X86EMUL_SMM_MASK (1 << 6)
|
||||
#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7)
|
||||
|
||||
/*
|
||||
* fastop functions are declared as taking a never-defined fastop parameter,
|
||||
* so they can't be called from C directly.
|
||||
*/
|
||||
struct fastop;
|
||||
|
||||
typedef void (*fastop_t)(struct fastop *);
|
||||
|
||||
struct x86_emulate_ctxt {
|
||||
const struct x86_emulate_ops *ops;
|
||||
|
||||
|
@ -324,7 +332,10 @@ struct x86_emulate_ctxt {
|
|||
struct operand src;
|
||||
struct operand src2;
|
||||
struct operand dst;
|
||||
int (*execute)(struct x86_emulate_ctxt *ctxt);
|
||||
union {
|
||||
int (*execute)(struct x86_emulate_ctxt *ctxt);
|
||||
fastop_t fop;
|
||||
};
|
||||
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
|
||||
/*
|
||||
* The following six fields are cleared together,
|
||||
|
|
|
@ -1122,6 +1122,7 @@ struct kvm_x86_ops {
|
|||
int (*handle_exit)(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion exit_fastpath);
|
||||
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||
void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
||||
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
|
||||
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
|
||||
|
@ -1146,7 +1147,7 @@ struct kvm_x86_ops {
|
|||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
||||
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
|
||||
|
|
|
@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
|
|||
PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
static inline void tss_update_io_bitmap(void)
|
||||
{
|
||||
PVOP_VCALL0(cpu.update_io_bitmap);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void paravirt_activate_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next)
|
||||
{
|
||||
|
|
|
@ -140,6 +140,10 @@ struct pv_cpu_ops {
|
|||
|
||||
void (*load_sp0)(unsigned long sp0);
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
void (*update_io_bitmap)(void);
|
||||
#endif
|
||||
|
||||
void (*wbinvd)(void);
|
||||
|
||||
/* cpuid emulation, mostly so that caps bits can be disabled */
|
||||
|
|
|
@ -72,7 +72,7 @@
|
|||
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
|
||||
#define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
|
||||
#define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
|
||||
#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE 0x04000000
|
||||
#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
|
||||
|
||||
#define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
|
||||
#define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
|
||||
|
|
|
@ -81,6 +81,7 @@
|
|||
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
|
||||
#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */
|
||||
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
|
||||
#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */
|
||||
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
|
||||
|
||||
#endif /* _ASM_X86_VMXFEATURES_H */
|
||||
|
|
|
@ -390,6 +390,7 @@ struct kvm_sync_regs {
|
|||
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
|
||||
#define KVM_STATE_NESTED_EVMCS 0x00000004
|
||||
#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
|
||||
|
||||
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
|
||||
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
|
||||
|
|
|
@ -445,7 +445,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
|||
* cpuid bit to be set. We need to ensure that we
|
||||
* update that bit in this CPU's "cpu_info".
|
||||
*/
|
||||
get_cpu_cap(c);
|
||||
set_cpu_cap(c, X86_FEATURE_OSPKE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
|
|
|
@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void)
|
|||
}
|
||||
}
|
||||
|
||||
static bool pv_tlb_flush_supported(void)
|
||||
{
|
||||
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static bool pv_ipi_supported(void)
|
||||
{
|
||||
return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
|
||||
}
|
||||
|
||||
static bool pv_sched_yield_supported(void)
|
||||
{
|
||||
return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
|
||||
}
|
||||
|
||||
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
|
||||
|
||||
static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
|
@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
|
|||
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
struct cpumask new_mask;
|
||||
struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
|
||||
const struct cpumask *local_mask;
|
||||
|
||||
cpumask_copy(&new_mask, mask);
|
||||
cpumask_clear_cpu(this_cpu, &new_mask);
|
||||
local_mask = &new_mask;
|
||||
cpumask_copy(new_mask, mask);
|
||||
cpumask_clear_cpu(this_cpu, new_mask);
|
||||
local_mask = new_mask;
|
||||
__send_ipi_mask(local_mask, vector);
|
||||
}
|
||||
|
||||
|
@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void)
|
|||
update_intr_gate(X86_TRAP_PF, async_page_fault);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
|
||||
|
||||
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
||||
const struct flush_tlb_info *info)
|
||||
|
@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
|||
u8 state;
|
||||
int cpu;
|
||||
struct kvm_steal_time *src;
|
||||
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
|
||||
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
|
||||
|
||||
cpumask_copy(flushmask, cpumask);
|
||||
/*
|
||||
|
@ -619,11 +640,10 @@ static void __init kvm_guest_init(void)
|
|||
pv_ops.time.steal_clock = kvm_steal_clock;
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
if (pv_tlb_flush_supported()) {
|
||||
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
||||
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
|
||||
pr_info("KVM setup pv remote TLB flush\n");
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||
|
@ -632,9 +652,7 @@ static void __init kvm_guest_init(void)
|
|||
#ifdef CONFIG_SMP
|
||||
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
|
||||
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
if (pv_sched_yield_supported()) {
|
||||
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
|
||||
pr_info("KVM setup pv sched yield\n");
|
||||
}
|
||||
|
@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void)
|
|||
static void __init kvm_apic_init(void)
|
||||
{
|
||||
#if defined(CONFIG_SMP)
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
|
||||
if (pv_ipi_supported())
|
||||
kvm_setup_pv_ipi();
|
||||
#endif
|
||||
}
|
||||
|
@ -732,26 +750,31 @@ static __init int activate_jump_labels(void)
|
|||
}
|
||||
arch_initcall(activate_jump_labels);
|
||||
|
||||
static __init int kvm_setup_pv_tlb_flush(void)
|
||||
static __init int kvm_alloc_cpumask(void)
|
||||
{
|
||||
int cpu;
|
||||
bool alloc = false;
|
||||
|
||||
if (!kvm_para_available() || nopv)
|
||||
return 0;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
if (pv_tlb_flush_supported())
|
||||
alloc = true;
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
if (pv_ipi_supported())
|
||||
alloc = true;
|
||||
#endif
|
||||
|
||||
if (alloc)
|
||||
for_each_possible_cpu(cpu) {
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
}
|
||||
pr_info("KVM setup pv remote TLB flush\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(kvm_setup_pv_tlb_flush);
|
||||
arch_initcall(kvm_alloc_cpumask);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/timer.h>
|
||||
#include <asm/special_insns.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/io_bitmap.h>
|
||||
|
||||
/*
|
||||
* nop stub, which must not clobber anything *including the stack* to
|
||||
|
@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {
|
|||
.cpu.iret = native_iret,
|
||||
.cpu.swapgs = native_swapgs,
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
.cpu.update_io_bitmap = native_tss_update_io_bitmap,
|
||||
#endif
|
||||
|
||||
.cpu.start_context_switch = paravirt_nop,
|
||||
.cpu.end_context_switch = paravirt_nop,
|
||||
|
||||
|
|
|
@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
|
|||
/**
|
||||
* tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
|
||||
*/
|
||||
void tss_update_io_bitmap(void)
|
||||
void native_tss_update_io_bitmap(void)
|
||||
{
|
||||
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
|
||||
struct thread_struct *t = ¤t->thread;
|
||||
|
|
|
@ -59,6 +59,19 @@ config KVM
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_WERROR
|
||||
bool "Compile KVM with -Werror"
|
||||
# KASAN may cause the build to fail due to larger frames
|
||||
default y if X86_64 && !KASAN
|
||||
# We use the dependency on !COMPILE_TEST to not be enabled
|
||||
# blindly in allmodconfig or allyesconfig configurations
|
||||
depends on (X86_64 && !KASAN) || !COMPILE_TEST
|
||||
depends on EXPERT
|
||||
help
|
||||
Add -Werror to the build flags for (and only for) i915.ko.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config KVM_INTEL
|
||||
tristate "KVM for Intel (and compatible) processors support"
|
||||
depends on KVM && IA32_FEAT_CTL
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ccflags-y += -Iarch/x86/kvm
|
||||
ccflags-$(CONFIG_KVM_WERROR) += -Werror
|
||||
|
||||
KVM := ../../../virt/kvm
|
||||
|
||||
|
|
|
@ -191,25 +191,6 @@
|
|||
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
|
||||
#define FASTOP_SIZE 8
|
||||
|
||||
/*
|
||||
* fastop functions have a special calling convention:
|
||||
*
|
||||
* dst: rax (in/out)
|
||||
* src: rdx (in/out)
|
||||
* src2: rcx (in)
|
||||
* flags: rflags (in/out)
|
||||
* ex: rsi (in:fastop pointer, out:zero if exception)
|
||||
*
|
||||
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
|
||||
* different operand sizes can be reached by calculation, rather than a jump
|
||||
* table (which would be bigger than the code).
|
||||
*
|
||||
* fastop functions are declared as taking a never-defined fastop parameter,
|
||||
* so they can't be called from C directly.
|
||||
*/
|
||||
|
||||
struct fastop;
|
||||
|
||||
struct opcode {
|
||||
u64 flags : 56;
|
||||
u64 intercept : 8;
|
||||
|
@ -311,8 +292,19 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
|
|||
#define ON64(x)
|
||||
#endif
|
||||
|
||||
typedef void (*fastop_t)(struct fastop *);
|
||||
|
||||
/*
|
||||
* fastop functions have a special calling convention:
|
||||
*
|
||||
* dst: rax (in/out)
|
||||
* src: rdx (in/out)
|
||||
* src2: rcx (in)
|
||||
* flags: rflags (in/out)
|
||||
* ex: rsi (in:fastop pointer, out:zero if exception)
|
||||
*
|
||||
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
|
||||
* different operand sizes can be reached by calculation, rather than a jump
|
||||
* table (which would be bigger than the code).
|
||||
*/
|
||||
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
|
||||
#define __FOP_FUNC(name) \
|
||||
|
@ -5683,7 +5675,7 @@ special_insn:
|
|||
|
||||
if (ctxt->execute) {
|
||||
if (ctxt->d & Fastop)
|
||||
rc = fastop(ctxt, (fastop_t)ctxt->execute);
|
||||
rc = fastop(ctxt, ctxt->fop);
|
||||
else
|
||||
rc = ctxt->execute(ctxt);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
|
|
|
@ -417,7 +417,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
|
|||
|
||||
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
|
||||
|
||||
if (irq.level &&
|
||||
if (irq.trig_mode &&
|
||||
kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
|
||||
irq.dest_id, irq.dest_mode))
|
||||
__set_bit(irq.vector, ioapic_handled_vectors);
|
||||
|
|
|
@ -627,9 +627,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
|
|||
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 val;
|
||||
if (pv_eoi_get_user(vcpu, &val) < 0)
|
||||
if (pv_eoi_get_user(vcpu, &val) < 0) {
|
||||
printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
|
||||
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
|
||||
return false;
|
||||
}
|
||||
return val & 0x1;
|
||||
}
|
||||
|
||||
|
@ -1046,11 +1048,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
|||
apic->regs + APIC_TMR);
|
||||
}
|
||||
|
||||
if (vcpu->arch.apicv_active)
|
||||
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
|
||||
else {
|
||||
if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ TRACE_EVENT(
|
|||
/* These depend on page entry type, so compute them now. */
|
||||
__field(bool, r)
|
||||
__field(bool, x)
|
||||
__field(u8, u)
|
||||
__field(signed char, u)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
|
|
@ -57,11 +57,13 @@
|
|||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#ifdef MODULE
|
||||
static const struct x86_cpu_id svm_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_SVM),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
|
||||
#endif
|
||||
|
||||
#define IOPM_ALLOC_ORDER 2
|
||||
#define MSRPM_ALLOC_ORDER 1
|
||||
|
@ -1005,33 +1007,32 @@ static void svm_cpu_uninit(int cpu)
|
|||
static int svm_cpu_init(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd;
|
||||
int r;
|
||||
|
||||
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
|
||||
if (!sd)
|
||||
return -ENOMEM;
|
||||
sd->cpu = cpu;
|
||||
r = -ENOMEM;
|
||||
sd->save_area = alloc_page(GFP_KERNEL);
|
||||
if (!sd->save_area)
|
||||
goto err_1;
|
||||
goto free_cpu_data;
|
||||
|
||||
if (svm_sev_enabled()) {
|
||||
r = -ENOMEM;
|
||||
sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
|
||||
sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!sd->sev_vmcbs)
|
||||
goto err_1;
|
||||
goto free_save_area;
|
||||
}
|
||||
|
||||
per_cpu(svm_data, cpu) = sd;
|
||||
|
||||
return 0;
|
||||
|
||||
err_1:
|
||||
free_save_area:
|
||||
__free_page(sd->save_area);
|
||||
free_cpu_data:
|
||||
kfree(sd);
|
||||
return r;
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
|
||||
|
@ -1350,6 +1351,24 @@ static __init void svm_adjust_mmio_mask(void)
|
|||
kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
|
||||
}
|
||||
|
||||
static void svm_hardware_teardown(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (svm_sev_enabled()) {
|
||||
bitmap_free(sev_asid_bitmap);
|
||||
bitmap_free(sev_reclaim_asid_bitmap);
|
||||
|
||||
sev_flush_asids();
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
svm_cpu_uninit(cpu);
|
||||
|
||||
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
|
||||
iopm_base = 0;
|
||||
}
|
||||
|
||||
static __init int svm_hardware_setup(void)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -1463,29 +1482,10 @@ static __init int svm_hardware_setup(void)
|
|||
return 0;
|
||||
|
||||
err:
|
||||
__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
|
||||
iopm_base = 0;
|
||||
svm_hardware_teardown();
|
||||
return r;
|
||||
}
|
||||
|
||||
static __exit void svm_hardware_unsetup(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (svm_sev_enabled()) {
|
||||
bitmap_free(sev_asid_bitmap);
|
||||
bitmap_free(sev_reclaim_asid_bitmap);
|
||||
|
||||
sev_flush_asids();
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
svm_cpu_uninit(cpu);
|
||||
|
||||
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
|
||||
iopm_base = 0;
|
||||
}
|
||||
|
||||
static void init_seg(struct vmcb_seg *seg)
|
||||
{
|
||||
seg->selector = 0;
|
||||
|
@ -2196,8 +2196,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
static int avic_init_vcpu(struct vcpu_svm *svm)
|
||||
{
|
||||
int ret;
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
if (!kvm_vcpu_apicv_active(&svm->vcpu))
|
||||
if (!avic || !irqchip_in_kernel(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
ret = avic_init_backing_page(&svm->vcpu);
|
||||
|
@ -5232,6 +5233,9 @@ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
|||
struct vmcb *vmcb = svm->vmcb;
|
||||
bool activated = kvm_vcpu_apicv_active(vcpu);
|
||||
|
||||
if (!avic)
|
||||
return;
|
||||
|
||||
if (activated) {
|
||||
/**
|
||||
* During AVIC temporary deactivation, guest could update
|
||||
|
@ -5255,8 +5259,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
|||
return;
|
||||
}
|
||||
|
||||
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
||||
static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
||||
{
|
||||
if (!vcpu->arch.apicv_active)
|
||||
return -1;
|
||||
|
||||
kvm_lapic_set_irr(vec, vcpu->arch.apic);
|
||||
smp_mb__after_atomic();
|
||||
|
||||
|
@ -5268,6 +5275,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
|||
put_cpu();
|
||||
} else
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
||||
|
@ -7378,7 +7387,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.cpu_has_kvm_support = has_svm,
|
||||
.disabled_by_bios = is_disabled,
|
||||
.hardware_setup = svm_hardware_setup,
|
||||
.hardware_unsetup = svm_hardware_unsetup,
|
||||
.hardware_unsetup = svm_hardware_teardown,
|
||||
.check_processor_compatibility = svm_check_processor_compat,
|
||||
.hardware_enable = svm_hardware_enable,
|
||||
.hardware_disable = svm_hardware_disable,
|
||||
|
@ -7433,6 +7442,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.run = svm_vcpu_run,
|
||||
.handle_exit = handle_exit,
|
||||
.skip_emulated_instruction = skip_emulated_instruction,
|
||||
.update_emulated_instruction = NULL,
|
||||
.set_interrupt_shadow = svm_set_interrupt_shadow,
|
||||
.get_interrupt_shadow = svm_get_interrupt_shadow,
|
||||
.patch_hypercall = svm_patch_hypercall,
|
||||
|
|
|
@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
|
|||
extern bool __read_mostly enable_unrestricted_guest;
|
||||
extern bool __read_mostly enable_ept_ad_bits;
|
||||
extern bool __read_mostly enable_pml;
|
||||
extern bool __read_mostly enable_apicv;
|
||||
extern int __read_mostly pt_mode;
|
||||
|
||||
#define PT_MODE_SYSTEM 0
|
||||
|
|
|
@ -3161,10 +3161,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|||
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
|
||||
*
|
||||
* Returns:
|
||||
* NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
|
||||
* NVMX_ENTRY_VMFAIL: Consistency check VMFail
|
||||
* NVMX_ENTRY_VMEXIT: Consistency check VMExit
|
||||
* NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
|
||||
* NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
|
||||
* NVMX_VMENTRY_VMFAIL: Consistency check VMFail
|
||||
* NVMX_VMENTRY_VMEXIT: Consistency check VMExit
|
||||
* NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
|
||||
*/
|
||||
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
||||
bool from_vmentry)
|
||||
|
@ -3609,8 +3609,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
|
|||
unsigned long exit_qual;
|
||||
bool block_nested_events =
|
||||
vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
|
||||
bool mtf_pending = vmx->nested.mtf_pending;
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
|
||||
/*
|
||||
* Clear the MTF state. If a higher priority VM-exit is delivered first,
|
||||
* this state is discarded.
|
||||
*/
|
||||
vmx->nested.mtf_pending = false;
|
||||
|
||||
if (lapic_in_kernel(vcpu) &&
|
||||
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
|
||||
if (block_nested_events)
|
||||
|
@ -3621,8 +3628,28 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process any exceptions that are not debug traps before MTF.
|
||||
*/
|
||||
if (vcpu->arch.exception.pending &&
|
||||
nested_vmx_check_exception(vcpu, &exit_qual)) {
|
||||
!vmx_pending_dbg_trap(vcpu) &&
|
||||
nested_vmx_check_exception(vcpu, &exit_qual)) {
|
||||
if (block_nested_events)
|
||||
return -EBUSY;
|
||||
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mtf_pending) {
|
||||
if (block_nested_events)
|
||||
return -EBUSY;
|
||||
nested_vmx_update_pending_dbg(vcpu);
|
||||
nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (vcpu->arch.exception.pending &&
|
||||
nested_vmx_check_exception(vcpu, &exit_qual)) {
|
||||
if (block_nested_events)
|
||||
return -EBUSY;
|
||||
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
|
||||
|
@ -5285,24 +5312,17 @@ fail:
|
|||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
/*
|
||||
* Return true if an IO instruction with the specified port and size should cause
|
||||
* a VM-exit into L1.
|
||||
*/
|
||||
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
|
||||
int size)
|
||||
{
|
||||
unsigned long exit_qualification;
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
gpa_t bitmap, last_bitmap;
|
||||
unsigned int port;
|
||||
int size;
|
||||
u8 b;
|
||||
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
|
||||
|
||||
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||
|
||||
port = exit_qualification >> 16;
|
||||
size = (exit_qualification & 7) + 1;
|
||||
|
||||
last_bitmap = (gpa_t)-1;
|
||||
b = -1;
|
||||
|
||||
|
@ -5329,8 +5349,26 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
unsigned long exit_qualification;
|
||||
unsigned short port;
|
||||
int size;
|
||||
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
|
||||
|
||||
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||
|
||||
port = exit_qualification >> 16;
|
||||
size = (exit_qualification & 7) + 1;
|
||||
|
||||
return nested_vmx_check_io_bitmaps(vcpu, port, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 if we should exit from L2 to L1 to handle an MSR access access,
|
||||
* Return 1 if we should exit from L2 to L1 to handle an MSR access,
|
||||
* rather than handle it ourselves in L0. I.e., check whether L1 expressed
|
||||
* disinterest in the current event (read or write a specific MSR) by using an
|
||||
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
|
||||
|
@ -5712,6 +5750,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
|
|||
|
||||
if (vmx->nested.nested_run_pending)
|
||||
kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
|
||||
|
||||
if (vmx->nested.mtf_pending)
|
||||
kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5892,6 +5933,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
|
|||
vmx->nested.nested_run_pending =
|
||||
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
|
||||
|
||||
vmx->nested.mtf_pending =
|
||||
!!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
|
||||
|
||||
ret = -EINVAL;
|
||||
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
||||
vmcs12->vmcs_link_pointer != -1ull) {
|
||||
|
@ -5949,8 +5993,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
|
|||
* bit in the high half is on if the corresponding bit in the control field
|
||||
* may be on. See also vmx_control_verify().
|
||||
*/
|
||||
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
|
||||
bool apicv)
|
||||
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
|
||||
{
|
||||
/*
|
||||
* Note that as a general rule, the high half of the MSRs (bits in
|
||||
|
@ -5977,7 +6020,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
|
|||
PIN_BASED_EXT_INTR_MASK |
|
||||
PIN_BASED_NMI_EXITING |
|
||||
PIN_BASED_VIRTUAL_NMIS |
|
||||
(apicv ? PIN_BASED_POSTED_INTR : 0);
|
||||
(enable_apicv ? PIN_BASED_POSTED_INTR : 0);
|
||||
msrs->pinbased_ctls_high |=
|
||||
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER;
|
||||
|
|
|
@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
|
|||
};
|
||||
|
||||
void vmx_leave_nested(struct kvm_vcpu *vcpu);
|
||||
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
|
||||
bool apicv);
|
||||
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
|
||||
void nested_vmx_hardware_unsetup(void);
|
||||
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
|
||||
void nested_vmx_set_vmcs_shadowing_bitmap(void);
|
||||
|
@ -34,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
|
|||
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
|
||||
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
|
||||
void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
|
||||
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
|
||||
int size);
|
||||
|
||||
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -175,6 +176,11 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
|
|||
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
|
||||
}
|
||||
|
||||
static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
|
||||
{
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
|
||||
}
|
||||
|
||||
static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
|
||||
{
|
||||
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
|
||||
|
|
|
@ -64,11 +64,13 @@
|
|||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#ifdef MODULE
|
||||
static const struct x86_cpu_id vmx_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_VMX),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
|
||||
#endif
|
||||
|
||||
bool __read_mostly enable_vpid = 1;
|
||||
module_param_named(vpid, enable_vpid, bool, 0444);
|
||||
|
@ -95,7 +97,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
|
|||
static bool __read_mostly fasteoi = 1;
|
||||
module_param(fasteoi, bool, S_IRUGO);
|
||||
|
||||
static bool __read_mostly enable_apicv = 1;
|
||||
bool __read_mostly enable_apicv = 1;
|
||||
module_param(enable_apicv, bool, S_IRUGO);
|
||||
|
||||
/*
|
||||
|
@ -1175,6 +1177,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||
vmx->guest_msrs[i].mask);
|
||||
|
||||
}
|
||||
|
||||
if (vmx->nested.need_vmcs12_to_shadow_sync)
|
||||
nested_sync_vmcs12_to_shadow(vcpu);
|
||||
|
||||
if (vmx->guest_state_loaded)
|
||||
return;
|
||||
|
||||
|
@ -1599,6 +1605,40 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Recognizes a pending MTF VM-exit and records the nested state for later
|
||||
* delivery.
|
||||
*/
|
||||
static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (!is_guest_mode(vcpu))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Per the SDM, MTF takes priority over debug-trap exceptions besides
|
||||
* T-bit traps. As instruction emulation is completed (i.e. at the
|
||||
* instruction boundary), any #DB exception pending delivery must be a
|
||||
* debug-trap. Record the pending MTF state to be delivered in
|
||||
* vmx_check_nested_events().
|
||||
*/
|
||||
if (nested_cpu_has_mtf(vmcs12) &&
|
||||
(!vcpu->arch.exception.pending ||
|
||||
vcpu->arch.exception.nr == DB_VECTOR))
|
||||
vmx->nested.mtf_pending = true;
|
||||
else
|
||||
vmx->nested.mtf_pending = false;
|
||||
}
|
||||
|
||||
static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vmx_update_emulated_instruction(vcpu);
|
||||
return skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
|
@ -3818,24 +3858,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
|
|||
* 2. If target vcpu isn't running(root mode), kick it to pick up the
|
||||
* interrupt from PIR in next vmentry.
|
||||
*/
|
||||
static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
||||
static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
int r;
|
||||
|
||||
r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
|
||||
if (!r)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (!vcpu->arch.apicv_active)
|
||||
return -1;
|
||||
|
||||
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* If a previous notification has sent the IPI, nothing to do. */
|
||||
if (pi_test_and_set_on(&vmx->pi_desc))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
|
||||
kvm_vcpu_kick(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6482,8 +6527,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
||||
}
|
||||
|
||||
if (vmx->nested.need_vmcs12_to_shadow_sync)
|
||||
nested_sync_vmcs12_to_shadow(vcpu);
|
||||
/*
|
||||
* We did this in prepare_switch_to_guest, because it needs to
|
||||
* be within srcu_read_lock.
|
||||
*/
|
||||
WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
|
||||
|
||||
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
|
||||
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
|
||||
|
@ -6757,8 +6805,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (nested)
|
||||
nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
|
||||
vmx_capability.ept,
|
||||
kvm_vcpu_apicv_active(vcpu));
|
||||
vmx_capability.ept);
|
||||
else
|
||||
memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
|
||||
|
||||
|
@ -6839,8 +6886,7 @@ static int __init vmx_check_processor_compat(void)
|
|||
if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
|
||||
return -EIO;
|
||||
if (nested)
|
||||
nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
|
||||
enable_apicv);
|
||||
nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
|
||||
if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
|
||||
printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
|
||||
smp_processor_id());
|
||||
|
@ -7101,6 +7147,40 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
|
|||
to_vmx(vcpu)->req_immediate_exit = true;
|
||||
}
|
||||
|
||||
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
|
||||
struct x86_instruction_info *info)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
unsigned short port;
|
||||
bool intercept;
|
||||
int size;
|
||||
|
||||
if (info->intercept == x86_intercept_in ||
|
||||
info->intercept == x86_intercept_ins) {
|
||||
port = info->src_val;
|
||||
size = info->dst_bytes;
|
||||
} else {
|
||||
port = info->dst_val;
|
||||
size = info->src_bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the 'use IO bitmaps' VM-execution control is 0, IO instruction
|
||||
* VM-exits depend on the 'unconditional IO exiting' VM-execution
|
||||
* control.
|
||||
*
|
||||
* Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
|
||||
*/
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
||||
intercept = nested_cpu_has(vmcs12,
|
||||
CPU_BASED_UNCOND_IO_EXITING);
|
||||
else
|
||||
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
|
||||
|
||||
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
|
||||
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
|
||||
struct x86_instruction_info *info,
|
||||
enum x86_intercept_stage stage)
|
||||
|
@ -7108,19 +7188,45 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
|
|||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
|
||||
|
||||
switch (info->intercept) {
|
||||
/*
|
||||
* RDPID causes #UD if disabled through secondary execution controls.
|
||||
* Because it is marked as EmulateOnUD, we need to intercept it here.
|
||||
*/
|
||||
if (info->intercept == x86_intercept_rdtscp &&
|
||||
!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
|
||||
ctxt->exception.vector = UD_VECTOR;
|
||||
ctxt->exception.error_code_valid = false;
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
case x86_intercept_rdtscp:
|
||||
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
|
||||
ctxt->exception.vector = UD_VECTOR;
|
||||
ctxt->exception.error_code_valid = false;
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
break;
|
||||
|
||||
case x86_intercept_in:
|
||||
case x86_intercept_ins:
|
||||
case x86_intercept_out:
|
||||
case x86_intercept_outs:
|
||||
return vmx_check_intercept_io(vcpu, info);
|
||||
|
||||
case x86_intercept_lgdt:
|
||||
case x86_intercept_lidt:
|
||||
case x86_intercept_lldt:
|
||||
case x86_intercept_ltr:
|
||||
case x86_intercept_sgdt:
|
||||
case x86_intercept_sidt:
|
||||
case x86_intercept_sldt:
|
||||
case x86_intercept_str:
|
||||
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
|
||||
break;
|
||||
|
||||
/* TODO: check more intercepts... */
|
||||
return X86EMUL_CONTINUE;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -7702,7 +7808,7 @@ static __init int hardware_setup(void)
|
|||
|
||||
if (nested) {
|
||||
nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
|
||||
vmx_capability.ept, enable_apicv);
|
||||
vmx_capability.ept);
|
||||
|
||||
r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
|
||||
if (r)
|
||||
|
@ -7786,7 +7892,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
|
||||
.run = vmx_vcpu_run,
|
||||
.handle_exit = vmx_handle_exit,
|
||||
.skip_emulated_instruction = skip_emulated_instruction,
|
||||
.skip_emulated_instruction = vmx_skip_emulated_instruction,
|
||||
.update_emulated_instruction = vmx_update_emulated_instruction,
|
||||
.set_interrupt_shadow = vmx_set_interrupt_shadow,
|
||||
.get_interrupt_shadow = vmx_get_interrupt_shadow,
|
||||
.patch_hypercall = vmx_patch_hypercall,
|
||||
|
|
|
@ -150,6 +150,9 @@ struct nested_vmx {
|
|||
/* L2 must run next, and mustn't decide to exit to L1. */
|
||||
bool nested_run_pending;
|
||||
|
||||
/* Pending MTF VM-exit into L1. */
|
||||
bool mtf_pending;
|
||||
|
||||
struct loaded_vmcs vmcs02;
|
||||
|
||||
/*
|
||||
|
|
|
@ -6891,6 +6891,8 @@ restart:
|
|||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r && ctxt->tf)
|
||||
r = kvm_vcpu_do_singlestep(vcpu);
|
||||
if (kvm_x86_ops->update_emulated_instruction)
|
||||
kvm_x86_ops->update_emulated_instruction(vcpu);
|
||||
__kvm_set_rflags(vcpu, ctxt->eflags);
|
||||
}
|
||||
|
||||
|
@ -7188,15 +7190,15 @@ static void kvm_timer_init(void)
|
|||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct cpufreq_policy policy;
|
||||
struct cpufreq_policy *policy;
|
||||
int cpu;
|
||||
|
||||
memset(&policy, 0, sizeof(policy));
|
||||
cpu = get_cpu();
|
||||
cpufreq_get_policy(&policy, cpu);
|
||||
if (policy.cpuinfo.max_freq)
|
||||
max_tsc_khz = policy.cpuinfo.max_freq;
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (policy && policy->cpuinfo.max_freq)
|
||||
max_tsc_khz = policy->cpuinfo.max_freq;
|
||||
put_cpu();
|
||||
cpufreq_cpu_put(policy);
|
||||
#endif
|
||||
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
@ -7306,12 +7308,12 @@ int kvm_arch_init(void *opaque)
|
|||
}
|
||||
|
||||
if (!ops->cpu_has_kvm_support()) {
|
||||
printk(KERN_ERR "kvm: no hardware support\n");
|
||||
pr_err_ratelimited("kvm: no hardware support\n");
|
||||
r = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
if (ops->disabled_by_bios()) {
|
||||
printk(KERN_ERR "kvm: disabled by bios\n");
|
||||
pr_err_ratelimited("kvm: disabled by bios\n");
|
||||
r = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -363,13 +363,8 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m,
|
|||
{
|
||||
const struct ptdump_range ptdump_ranges[] = {
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
|
||||
#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \
|
||||
normalize_addr_shift)
|
||||
|
||||
{0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
|
||||
{normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
|
||||
{GUARD_HOLE_END_ADDR, ~0UL},
|
||||
#else
|
||||
{0, ~0UL},
|
||||
#endif
|
||||
|
|
|
@ -180,7 +180,7 @@ void efi_sync_low_kernel_mappings(void)
|
|||
static inline phys_addr_t
|
||||
virt_to_phys_or_null_size(void *va, unsigned long size)
|
||||
{
|
||||
bool bad_size;
|
||||
phys_addr_t pa;
|
||||
|
||||
if (!va)
|
||||
return 0;
|
||||
|
@ -188,16 +188,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
|
|||
if (virt_addr_valid(va))
|
||||
return virt_to_phys(va);
|
||||
|
||||
/*
|
||||
* A fully aligned variable on the stack is guaranteed not to
|
||||
* cross a page bounary. Try to catch strings on the stack by
|
||||
* checking that 'size' is a power of two.
|
||||
*/
|
||||
bad_size = size > PAGE_SIZE || !is_power_of_2(size);
|
||||
pa = slow_virt_to_phys(va);
|
||||
|
||||
WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
|
||||
/* check if the object crosses a page boundary */
|
||||
if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
|
||||
return 0;
|
||||
|
||||
return slow_virt_to_phys(va);
|
||||
return pa;
|
||||
}
|
||||
|
||||
#define virt_to_phys_or_null(addr) \
|
||||
|
@ -568,85 +565,25 @@ efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
|
|||
|
||||
static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
|
||||
{
|
||||
efi_status_t status;
|
||||
u32 phys_tm, phys_tc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rtc_lock);
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
phys_tm = virt_to_phys_or_null(tm);
|
||||
phys_tc = virt_to_phys_or_null(tc);
|
||||
|
||||
status = efi_thunk(get_time, phys_tm, phys_tc);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
spin_unlock(&rtc_lock);
|
||||
|
||||
return status;
|
||||
return EFI_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static efi_status_t efi_thunk_set_time(efi_time_t *tm)
|
||||
{
|
||||
efi_status_t status;
|
||||
u32 phys_tm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rtc_lock);
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
phys_tm = virt_to_phys_or_null(tm);
|
||||
|
||||
status = efi_thunk(set_time, phys_tm);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
spin_unlock(&rtc_lock);
|
||||
|
||||
return status;
|
||||
return EFI_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
|
||||
efi_time_t *tm)
|
||||
{
|
||||
efi_status_t status;
|
||||
u32 phys_enabled, phys_pending, phys_tm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rtc_lock);
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
phys_enabled = virt_to_phys_or_null(enabled);
|
||||
phys_pending = virt_to_phys_or_null(pending);
|
||||
phys_tm = virt_to_phys_or_null(tm);
|
||||
|
||||
status = efi_thunk(get_wakeup_time, phys_enabled,
|
||||
phys_pending, phys_tm);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
spin_unlock(&rtc_lock);
|
||||
|
||||
return status;
|
||||
return EFI_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
|
||||
{
|
||||
efi_status_t status;
|
||||
u32 phys_tm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&rtc_lock);
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
phys_tm = virt_to_phys_or_null(tm);
|
||||
|
||||
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
spin_unlock(&rtc_lock);
|
||||
|
||||
return status;
|
||||
return EFI_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static unsigned long efi_name_size(efi_char16_t *name)
|
||||
|
@ -658,6 +595,8 @@ static efi_status_t
|
|||
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
|
||||
u32 *attr, unsigned long *data_size, void *data)
|
||||
{
|
||||
u8 buf[24] __aligned(8);
|
||||
efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
|
||||
efi_status_t status;
|
||||
u32 phys_name, phys_vendor, phys_attr;
|
||||
u32 phys_data_size, phys_data;
|
||||
|
@ -665,14 +604,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
|
|||
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
*vnd = *vendor;
|
||||
|
||||
phys_data_size = virt_to_phys_or_null(data_size);
|
||||
phys_vendor = virt_to_phys_or_null(vendor);
|
||||
phys_vendor = virt_to_phys_or_null(vnd);
|
||||
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
|
||||
phys_attr = virt_to_phys_or_null(attr);
|
||||
phys_data = virt_to_phys_or_null_size(data, *data_size);
|
||||
|
||||
status = efi_thunk(get_variable, phys_name, phys_vendor,
|
||||
phys_attr, phys_data_size, phys_data);
|
||||
if (!phys_name || (data && !phys_data))
|
||||
status = EFI_INVALID_PARAMETER;
|
||||
else
|
||||
status = efi_thunk(get_variable, phys_name, phys_vendor,
|
||||
phys_attr, phys_data_size, phys_data);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
|
||||
|
@ -683,19 +627,25 @@ static efi_status_t
|
|||
efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
|
||||
u32 attr, unsigned long data_size, void *data)
|
||||
{
|
||||
u8 buf[24] __aligned(8);
|
||||
efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
|
||||
u32 phys_name, phys_vendor, phys_data;
|
||||
efi_status_t status;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
*vnd = *vendor;
|
||||
|
||||
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
|
||||
phys_vendor = virt_to_phys_or_null(vendor);
|
||||
phys_vendor = virt_to_phys_or_null(vnd);
|
||||
phys_data = virt_to_phys_or_null_size(data, data_size);
|
||||
|
||||
/* If data_size is > sizeof(u32) we've got problems */
|
||||
status = efi_thunk(set_variable, phys_name, phys_vendor,
|
||||
attr, data_size, phys_data);
|
||||
if (!phys_name || !phys_data)
|
||||
status = EFI_INVALID_PARAMETER;
|
||||
else
|
||||
status = efi_thunk(set_variable, phys_name, phys_vendor,
|
||||
attr, data_size, phys_data);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
|
||||
|
@ -707,6 +657,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
|
|||
u32 attr, unsigned long data_size,
|
||||
void *data)
|
||||
{
|
||||
u8 buf[24] __aligned(8);
|
||||
efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
|
||||
u32 phys_name, phys_vendor, phys_data;
|
||||
efi_status_t status;
|
||||
unsigned long flags;
|
||||
|
@ -714,13 +666,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
|
|||
if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
|
||||
return EFI_NOT_READY;
|
||||
|
||||
*vnd = *vendor;
|
||||
|
||||
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
|
||||
phys_vendor = virt_to_phys_or_null(vendor);
|
||||
phys_vendor = virt_to_phys_or_null(vnd);
|
||||
phys_data = virt_to_phys_or_null_size(data, data_size);
|
||||
|
||||
/* If data_size is > sizeof(u32) we've got problems */
|
||||
status = efi_thunk(set_variable, phys_name, phys_vendor,
|
||||
attr, data_size, phys_data);
|
||||
if (!phys_name || !phys_data)
|
||||
status = EFI_INVALID_PARAMETER;
|
||||
else
|
||||
status = efi_thunk(set_variable, phys_name, phys_vendor,
|
||||
attr, data_size, phys_data);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
|
||||
|
@ -732,39 +688,36 @@ efi_thunk_get_next_variable(unsigned long *name_size,
|
|||
efi_char16_t *name,
|
||||
efi_guid_t *vendor)
|
||||
{
|
||||
u8 buf[24] __aligned(8);
|
||||
efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
|
||||
efi_status_t status;
|
||||
u32 phys_name_size, phys_name, phys_vendor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
*vnd = *vendor;
|
||||
|
||||
phys_name_size = virt_to_phys_or_null(name_size);
|
||||
phys_vendor = virt_to_phys_or_null(vendor);
|
||||
phys_vendor = virt_to_phys_or_null(vnd);
|
||||
phys_name = virt_to_phys_or_null_size(name, *name_size);
|
||||
|
||||
status = efi_thunk(get_next_variable, phys_name_size,
|
||||
phys_name, phys_vendor);
|
||||
if (!phys_name)
|
||||
status = EFI_INVALID_PARAMETER;
|
||||
else
|
||||
status = efi_thunk(get_next_variable, phys_name_size,
|
||||
phys_name, phys_vendor);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
|
||||
*vendor = *vnd;
|
||||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
efi_thunk_get_next_high_mono_count(u32 *count)
|
||||
{
|
||||
efi_status_t status;
|
||||
u32 phys_count;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&efi_runtime_lock, flags);
|
||||
|
||||
phys_count = virt_to_phys_or_null(count);
|
||||
status = efi_thunk(get_next_high_mono_count, phys_count);
|
||||
|
||||
spin_unlock_irqrestore(&efi_runtime_lock, flags);
|
||||
|
||||
return status;
|
||||
return EFI_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -72,6 +72,9 @@
|
|||
#include <asm/mwait.h>
|
||||
#include <asm/pci_x86.h>
|
||||
#include <asm/cpu.h>
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
#include <asm/io_bitmap.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
#include <linux/acpi.h>
|
||||
|
@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0)
|
|||
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
static void xen_update_io_bitmap(void)
|
||||
{
|
||||
struct physdev_set_iobitmap iobitmap;
|
||||
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
|
||||
|
||||
native_tss_update_io_bitmap();
|
||||
|
||||
iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
|
||||
tss->x86_tss.io_bitmap_base;
|
||||
if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
|
||||
iobitmap.nr_ports = 0;
|
||||
else
|
||||
iobitmap.nr_ports = IO_BITMAP_BITS;
|
||||
|
||||
HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void xen_io_delay(void)
|
||||
{
|
||||
}
|
||||
|
@ -1047,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
|||
.write_idt_entry = xen_write_idt_entry,
|
||||
.load_sp0 = xen_load_sp0,
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
.update_io_bitmap = xen_update_io_bitmap,
|
||||
#endif
|
||||
.io_delay = xen_io_delay,
|
||||
|
||||
/* Xen takes care of %gs when switching to usermode for us */
|
||||
|
|
|
@ -412,7 +412,7 @@ void blk_insert_flush(struct request *rq)
|
|||
*/
|
||||
if ((policy & REQ_FSEQ_DATA) &&
|
||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||
blk_mq_request_bypass_insert(rq, false);
|
||||
blk_mq_request_bypass_insert(rq, false, false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
|||
bool has_sched,
|
||||
struct request *rq)
|
||||
{
|
||||
/* dispatch flush rq directly */
|
||||
if (rq->rq_flags & RQF_FLUSH_SEQ) {
|
||||
spin_lock(&hctx->lock);
|
||||
list_add(&rq->queuelist, &hctx->dispatch);
|
||||
spin_unlock(&hctx->lock);
|
||||
/*
|
||||
* dispatch flush and passthrough rq directly
|
||||
*
|
||||
* passthrough request has to be added to hctx->dispatch directly.
|
||||
* For some reason, device may be in one situation which can't
|
||||
* handle FS request, so STS_RESOURCE is always returned and the
|
||||
* FS request will be added to hctx->dispatch. However passthrough
|
||||
* request may be required at that time for fixing the problem. If
|
||||
* passthrough request is added to scheduler queue, there isn't any
|
||||
* chance to dispatch it given we prioritize requests in hctx->dispatch.
|
||||
*/
|
||||
if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (has_sched)
|
||||
rq->rq_flags |= RQF_SORTED;
|
||||
|
@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|||
|
||||
WARN_ON(e && (rq->tag != -1));
|
||||
|
||||
if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
|
||||
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
|
||||
blk_mq_request_bypass_insert(rq, at_head, false);
|
||||
goto run;
|
||||
}
|
||||
|
||||
if (e && e->type->ops.insert_requests) {
|
||||
LIST_HEAD(list);
|
||||
|
|
|
@ -183,8 +183,8 @@ found_tag:
|
|||
return tag + tag_offset;
|
||||
}
|
||||
|
||||
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
|
||||
struct blk_mq_ctx *ctx, unsigned int tag)
|
||||
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
||||
unsigned int tag)
|
||||
{
|
||||
if (!blk_mq_tag_is_reserved(tags, tag)) {
|
||||
const int real_tag = tag - tags->nr_reserved_tags;
|
||||
|
|
|
@ -26,8 +26,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
|
|||
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
|
||||
|
||||
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
|
||||
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
|
||||
struct blk_mq_ctx *ctx, unsigned int tag);
|
||||
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
||||
unsigned int tag);
|
||||
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_tags **tags,
|
||||
unsigned int depth, bool can_grow);
|
||||
|
|
|
@ -477,9 +477,9 @@ static void __blk_mq_free_request(struct request *rq)
|
|||
blk_pm_mark_last_busy(rq);
|
||||
rq->mq_hctx = NULL;
|
||||
if (rq->tag != -1)
|
||||
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
|
||||
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
|
||||
if (sched_tag != -1)
|
||||
blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
|
||||
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
|
||||
blk_mq_sched_restart(hctx);
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
|
@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||
* merge.
|
||||
*/
|
||||
if (rq->rq_flags & RQF_DONTPREP)
|
||||
blk_mq_request_bypass_insert(rq, false);
|
||||
blk_mq_request_bypass_insert(rq, false, false);
|
||||
else
|
||||
blk_mq_sched_insert_request(rq, true, false, false);
|
||||
}
|
||||
|
@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|||
q->mq_ops->commit_rqs(hctx);
|
||||
|
||||
spin_lock(&hctx->lock);
|
||||
list_splice_init(list, &hctx->dispatch);
|
||||
list_splice_tail_init(list, &hctx->dispatch);
|
||||
spin_unlock(&hctx->lock);
|
||||
|
||||
/*
|
||||
|
@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
* Should only be used carefully, when the caller knows we want to
|
||||
* bypass a potential IO scheduler on the target device.
|
||||
*/
|
||||
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
|
||||
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
|
||||
bool run_queue)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
||||
spin_lock(&hctx->lock);
|
||||
list_add_tail(&rq->queuelist, &hctx->dispatch);
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &hctx->dispatch);
|
||||
else
|
||||
list_add_tail(&rq->queuelist, &hctx->dispatch);
|
||||
spin_unlock(&hctx->lock);
|
||||
|
||||
if (run_queue)
|
||||
|
@ -1849,7 +1853,7 @@ insert:
|
|||
if (bypass_insert)
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
blk_mq_request_bypass_insert(rq, run_queue);
|
||||
blk_mq_request_bypass_insert(rq, false, run_queue);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
|
@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
|
||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
|
||||
blk_mq_request_bypass_insert(rq, true);
|
||||
blk_mq_request_bypass_insert(rq, false, true);
|
||||
else if (ret != BLK_STS_OK)
|
||||
blk_mq_end_request(rq, ret);
|
||||
|
||||
|
@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
|||
if (ret != BLK_STS_OK) {
|
||||
if (ret == BLK_STS_RESOURCE ||
|
||||
ret == BLK_STS_DEV_RESOURCE) {
|
||||
blk_mq_request_bypass_insert(rq,
|
||||
blk_mq_request_bypass_insert(rq, false,
|
||||
list_empty(list));
|
||||
break;
|
||||
}
|
||||
|
@ -3398,7 +3402,6 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
|
|||
}
|
||||
|
||||
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq)
|
||||
{
|
||||
unsigned long ret = 0;
|
||||
|
@ -3431,7 +3434,6 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
|
|||
}
|
||||
|
||||
static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq)
|
||||
{
|
||||
struct hrtimer_sleeper hs;
|
||||
|
@ -3451,7 +3453,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
|
|||
if (q->poll_nsec > 0)
|
||||
nsecs = q->poll_nsec;
|
||||
else
|
||||
nsecs = blk_mq_poll_nsecs(q, hctx, rq);
|
||||
nsecs = blk_mq_poll_nsecs(q, rq);
|
||||
|
||||
if (!nsecs)
|
||||
return false;
|
||||
|
@ -3506,7 +3508,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
|
|||
return false;
|
||||
}
|
||||
|
||||
return blk_mq_poll_hybrid_sleep(q, hctx, rq);
|
||||
return blk_mq_poll_hybrid_sleep(q, rq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
|||
*/
|
||||
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
bool at_head);
|
||||
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
|
||||
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
|
||||
bool run_queue);
|
||||
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
struct list_head *list);
|
||||
|
||||
|
@ -199,7 +200,7 @@ static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
|
|||
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq)
|
||||
{
|
||||
blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
|
||||
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
|
||||
rq->tag = -1;
|
||||
|
||||
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
|
||||
|
|
|
@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool acpi_no_watchdog;
|
||||
|
||||
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
|
||||
{
|
||||
const struct acpi_table_wdat *wdat = NULL;
|
||||
acpi_status status;
|
||||
|
||||
if (acpi_disabled)
|
||||
if (acpi_disabled || acpi_no_watchdog)
|
||||
return NULL;
|
||||
|
||||
status = acpi_get_table(ACPI_SIG_WDAT, 0,
|
||||
|
@ -88,6 +90,14 @@ bool acpi_has_watchdog(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
|
||||
|
||||
/* ACPI watchdog can be disabled on boot command line */
|
||||
static int __init disable_acpi_watchdog(char *str)
|
||||
{
|
||||
acpi_no_watchdog = true;
|
||||
return 1;
|
||||
}
|
||||
__setup("acpi_no_watchdog", disable_acpi_watchdog);
|
||||
|
||||
void __init acpi_watchdog_init(void)
|
||||
{
|
||||
const struct acpi_wdat_entry *entries;
|
||||
|
@ -126,12 +136,11 @@ void __init acpi_watchdog_init(void)
|
|||
gas = &entries[i].register_region;
|
||||
|
||||
res.start = gas->address;
|
||||
res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
|
||||
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
res.flags = IORESOURCE_MEM;
|
||||
res.end = res.start + ALIGN(gas->access_width, 4) - 1;
|
||||
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
res.flags = IORESOURCE_IO;
|
||||
res.end = res.start + gas->access_width - 1;
|
||||
} else {
|
||||
pr_warn("Unsupported address space: %u\n",
|
||||
gas->space_id);
|
||||
|
|
|
@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
|
|||
/* selects the fdc and drive, and enables the fdc's input/dma. */
|
||||
static void set_fdc(int drive)
|
||||
{
|
||||
unsigned int new_fdc = fdc;
|
||||
|
||||
if (drive >= 0 && drive < N_DRIVE) {
|
||||
fdc = FDC(drive);
|
||||
new_fdc = FDC(drive);
|
||||
current_drive = drive;
|
||||
}
|
||||
if (fdc != 1 && fdc != 0) {
|
||||
if (new_fdc >= N_FDC) {
|
||||
pr_info("bad fdc value\n");
|
||||
return;
|
||||
}
|
||||
fdc = new_fdc;
|
||||
set_dor(fdc, ~0, 8);
|
||||
#if N_FDC > 1
|
||||
set_dor(1 - fdc, ~8, 0);
|
||||
|
|
|
@ -14,9 +14,6 @@
|
|||
#include <linux/fault-inject.h>
|
||||
|
||||
struct nullb_cmd {
|
||||
struct list_head list;
|
||||
struct llist_node ll_list;
|
||||
struct __call_single_data csd;
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
unsigned int tag;
|
||||
|
|
|
@ -1518,8 +1518,6 @@ static int setup_commands(struct nullb_queue *nq)
|
|||
|
||||
for (i = 0; i < nq->queue_depth; i++) {
|
||||
cmd = &nq->cmds[i];
|
||||
INIT_LIST_HEAD(&cmd->list);
|
||||
cmd->ll_list.next = NULL;
|
||||
cmd->tag = -1U;
|
||||
}
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ static const struct block_device_operations pcd_bdops = {
|
|||
.release = pcd_block_release,
|
||||
.ioctl = pcd_block_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.ioctl = blkdev_compat_ptr_ioctl,
|
||||
.compat_ioctl = blkdev_compat_ptr_ioctl,
|
||||
#endif
|
||||
.check_events = pcd_block_check_events,
|
||||
};
|
||||
|
|
|
@ -519,7 +519,7 @@ static const struct block_device_operations gdrom_bdops = {
|
|||
.check_events = gdrom_bdops_check_events,
|
||||
.ioctl = gdrom_bdops_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.ioctl = blkdev_compat_ptr_ioctl,
|
||||
.compat_ioctl = blkdev_compat_ptr_ioctl,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -1076,9 +1076,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
|
|||
pol = policy->last_policy;
|
||||
} else if (def_gov) {
|
||||
pol = cpufreq_parse_policy(def_gov->name);
|
||||
} else {
|
||||
return -ENODATA;
|
||||
/*
|
||||
* In case the default governor is neiter "performance"
|
||||
* nor "powersave", fall back to the initial policy
|
||||
* value set by the driver.
|
||||
*/
|
||||
if (pol == CPUFREQ_POLICY_UNKNOWN)
|
||||
pol = policy->policy;
|
||||
}
|
||||
if (pol != CPUFREQ_POLICY_PERFORMANCE &&
|
||||
pol != CPUFREQ_POLICY_POWERSAVE)
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
return cpufreq_set_policy(policy, gov, pol);
|
||||
|
|
|
@ -738,7 +738,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
|||
{
|
||||
struct devfreq *devfreq;
|
||||
struct devfreq_governor *governor;
|
||||
static atomic_t devfreq_no = ATOMIC_INIT(-1);
|
||||
int err = 0;
|
||||
|
||||
if (!dev || !profile || !governor_name) {
|
||||
|
@ -800,8 +799,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
|||
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
|
||||
atomic_set(&devfreq->suspend_count, 0);
|
||||
|
||||
dev_set_name(&devfreq->dev, "devfreq%d",
|
||||
atomic_inc_return(&devfreq_no));
|
||||
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
|
||||
err = device_register(&devfreq->dev);
|
||||
if (err) {
|
||||
mutex_unlock(&devfreq->lock);
|
||||
|
|
|
@ -552,7 +552,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
|
|||
|
||||
seed = early_memremap(efi.rng_seed, sizeof(*seed));
|
||||
if (seed != NULL) {
|
||||
size = seed->size;
|
||||
size = READ_ONCE(seed->size);
|
||||
early_memunmap(seed, sizeof(*seed));
|
||||
} else {
|
||||
pr_err("Could not map UEFI random seed!\n");
|
||||
|
@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
|
|||
sizeof(*seed) + size);
|
||||
if (seed != NULL) {
|
||||
pr_notice("seeding entropy pool\n");
|
||||
add_bootloader_randomness(seed->bits, seed->size);
|
||||
add_bootloader_randomness(seed->bits, size);
|
||||
early_memunmap(seed, sizeof(*seed) + size);
|
||||
} else {
|
||||
pr_err("Could not map UEFI random seed!\n");
|
||||
|
|
|
@ -1389,7 +1389,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
|
|||
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_ATOMIC |
|
||||
DRIVER_ATOMIC |
|
||||
DRIVER_GEM |
|
||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
|
||||
DRIVER_SYNCOBJ_TIMELINE,
|
||||
|
|
|
@ -195,6 +195,7 @@ struct amdgpu_gmc {
|
|||
uint32_t srbm_soft_reset;
|
||||
bool prt_warning;
|
||||
uint64_t stolen_size;
|
||||
uint32_t sdpif_register;
|
||||
/* apertures */
|
||||
u64 shared_aperture_start;
|
||||
u64 shared_aperture_end;
|
||||
|
|
|
@ -1271,6 +1271,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_restore_registers - restores regs
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* This restores register values, saved at suspend.
|
||||
*/
|
||||
static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->asic_type == CHIP_RAVEN)
|
||||
WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_gart_enable - gart enable
|
||||
*
|
||||
|
@ -1376,6 +1389,20 @@ static int gmc_v9_0_hw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_save_registers - saves regs
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* This saves potential register values that should be
|
||||
* restored upon resume
|
||||
*/
|
||||
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->asic_type == CHIP_RAVEN)
|
||||
adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_gart_disable - gart disable
|
||||
*
|
||||
|
@ -1412,9 +1439,16 @@ static int gmc_v9_0_hw_fini(void *handle)
|
|||
|
||||
static int gmc_v9_0_suspend(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return gmc_v9_0_hw_fini(adev);
|
||||
r = gmc_v9_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
gmc_v9_0_save_registers(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_resume(void *handle)
|
||||
|
@ -1422,6 +1456,7 @@ static int gmc_v9_0_resume(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
gmc_v9_0_restore_registers(adev);
|
||||
r = gmc_v9_0_hw_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -7376,6 +7376,8 @@
|
|||
#define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
|
||||
#define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
|
||||
|
||||
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
|
||||
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
|
||||
|
||||
// addressBlock: dce_dc_fmt4_dispdec
|
||||
// base address: 0x2000
|
||||
|
|
|
@ -978,8 +978,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
|||
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
|
||||
int ret = 0;
|
||||
|
||||
max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
|
||||
if (!smu->smu_table.max_sustainable_clocks)
|
||||
max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
|
||||
GFP_KERNEL);
|
||||
else
|
||||
max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
|
||||
|
||||
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
|
||||
|
||||
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
|
||||
|
|
|
@ -294,7 +294,7 @@ extra-$(CONFIG_DRM_I915_WERROR) += \
|
|||
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
|
||||
|
||||
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
|
||||
cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
|
||||
cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
|
||||
|
||||
$(obj)/%.hdrtest: $(src)/%.h FORCE
|
||||
$(call if_changed_dep,hdrtest)
|
||||
|
|
|
@ -256,8 +256,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
|
|||
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
|
||||
freed = i915_gem_shrink(i915, -1UL, NULL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE);
|
||||
I915_SHRINK_UNBOUND);
|
||||
}
|
||||
|
||||
return freed;
|
||||
|
@ -336,7 +335,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
freed_pages = 0;
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
|
||||
I915_SHRINK_ACTIVE |
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_WRITEBACK);
|
||||
|
|
|
@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
|
|||
dmabuf_obj = container_of(pos,
|
||||
struct intel_vgpu_dmabuf_obj, list);
|
||||
if (dmabuf_obj == obj) {
|
||||
list_del(pos);
|
||||
intel_gvt_hypervisor_put_vfio_device(vgpu);
|
||||
idr_remove(&vgpu->object_idr,
|
||||
dmabuf_obj->dmabuf_id);
|
||||
kfree(dmabuf_obj->info);
|
||||
kfree(dmabuf_obj);
|
||||
list_del(pos);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -560,9 +560,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
|
||||
intel_vgpu_reset_mmio(vgpu, dmlr);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_reset_display(vgpu);
|
||||
|
||||
if (dmlr) {
|
||||
intel_vgpu_reset_display(vgpu);
|
||||
intel_vgpu_reset_cfg_space(vgpu);
|
||||
/* only reset the failsafe mode when dmlr reset */
|
||||
vgpu->failsafe = false;
|
||||
|
|
|
@ -437,7 +437,7 @@ static const struct intel_device_info snb_m_gt2_info = {
|
|||
.has_rc6 = 1, \
|
||||
.has_rc6p = 1, \
|
||||
.has_rps = true, \
|
||||
.ppgtt_type = INTEL_PPGTT_FULL, \
|
||||
.ppgtt_type = INTEL_PPGTT_ALIASING, \
|
||||
.ppgtt_size = 31, \
|
||||
IVB_PIPE_OFFSETS, \
|
||||
IVB_CURSOR_OFFSETS, \
|
||||
|
@ -494,7 +494,7 @@ static const struct intel_device_info vlv_info = {
|
|||
.has_rps = true,
|
||||
.display.has_gmch = 1,
|
||||
.display.has_hotplug = 1,
|
||||
.ppgtt_type = INTEL_PPGTT_FULL,
|
||||
.ppgtt_type = INTEL_PPGTT_ALIASING,
|
||||
.ppgtt_size = 31,
|
||||
.has_snoop = true,
|
||||
.has_coherent_ggtt = false,
|
||||
|
|
|
@ -822,11 +822,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
|
|||
return sprintf(buf, "config=0x%lx\n", eattr->val);
|
||||
}
|
||||
|
||||
static struct attribute_group i915_pmu_events_attr_group = {
|
||||
.name = "events",
|
||||
/* Patch in attrs at runtime. */
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
i915_pmu_get_attr_cpumask(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -846,13 +841,6 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
|
|||
.attrs = i915_cpumask_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *i915_pmu_attr_groups[] = {
|
||||
&i915_pmu_format_attr_group,
|
||||
&i915_pmu_events_attr_group,
|
||||
&i915_pmu_cpumask_attr_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
#define __event(__config, __name, __unit) \
|
||||
{ \
|
||||
.config = (__config), \
|
||||
|
@ -1026,23 +1014,23 @@ err_alloc:
|
|||
|
||||
static void free_event_attributes(struct i915_pmu *pmu)
|
||||
{
|
||||
struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
|
||||
struct attribute **attr_iter = pmu->events_attr_group.attrs;
|
||||
|
||||
for (; *attr_iter; attr_iter++)
|
||||
kfree((*attr_iter)->name);
|
||||
|
||||
kfree(i915_pmu_events_attr_group.attrs);
|
||||
kfree(pmu->events_attr_group.attrs);
|
||||
kfree(pmu->i915_attr);
|
||||
kfree(pmu->pmu_attr);
|
||||
|
||||
i915_pmu_events_attr_group.attrs = NULL;
|
||||
pmu->events_attr_group.attrs = NULL;
|
||||
pmu->i915_attr = NULL;
|
||||
pmu->pmu_attr = NULL;
|
||||
}
|
||||
|
||||
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
|
||||
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
|
||||
|
||||
GEM_BUG_ON(!pmu->base.event_init);
|
||||
|
||||
|
@ -1055,7 +1043,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
|
|||
|
||||
static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
|
||||
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
|
||||
unsigned int target;
|
||||
|
||||
GEM_BUG_ON(!pmu->base.event_init);
|
||||
|
@ -1072,8 +1060,6 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
|
||||
|
||||
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
|
||||
{
|
||||
enum cpuhp_state slot;
|
||||
|
@ -1087,21 +1073,22 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
|
|||
return ret;
|
||||
|
||||
slot = ret;
|
||||
ret = cpuhp_state_add_instance(slot, &pmu->node);
|
||||
ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
|
||||
if (ret) {
|
||||
cpuhp_remove_multi_state(slot);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpuhp_slot = slot;
|
||||
pmu->cpuhp.slot = slot;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
|
||||
{
|
||||
WARN_ON(cpuhp_slot == CPUHP_INVALID);
|
||||
WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
|
||||
cpuhp_remove_multi_state(cpuhp_slot);
|
||||
WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
|
||||
WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
|
||||
cpuhp_remove_multi_state(pmu->cpuhp.slot);
|
||||
pmu->cpuhp.slot = CPUHP_INVALID;
|
||||
}
|
||||
|
||||
static bool is_igp(struct drm_i915_private *i915)
|
||||
|
@ -1118,6 +1105,13 @@ static bool is_igp(struct drm_i915_private *i915)
|
|||
void i915_pmu_register(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_pmu *pmu = &i915->pmu;
|
||||
const struct attribute_group *attr_groups[] = {
|
||||
&i915_pmu_format_attr_group,
|
||||
&pmu->events_attr_group,
|
||||
&i915_pmu_cpumask_attr_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (INTEL_GEN(i915) <= 2) {
|
||||
|
@ -1128,6 +1122,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
spin_lock_init(&pmu->lock);
|
||||
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
pmu->timer.function = i915_sample;
|
||||
pmu->cpuhp.slot = CPUHP_INVALID;
|
||||
|
||||
if (!is_igp(i915)) {
|
||||
pmu->name = kasprintf(GFP_KERNEL,
|
||||
|
@ -1143,11 +1138,16 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
if (!pmu->name)
|
||||
goto err;
|
||||
|
||||
i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
|
||||
if (!i915_pmu_events_attr_group.attrs)
|
||||
pmu->events_attr_group.name = "events";
|
||||
pmu->events_attr_group.attrs = create_event_attributes(pmu);
|
||||
if (!pmu->events_attr_group.attrs)
|
||||
goto err_name;
|
||||
|
||||
pmu->base.attr_groups = i915_pmu_attr_groups;
|
||||
pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
|
||||
GFP_KERNEL);
|
||||
if (!pmu->base.attr_groups)
|
||||
goto err_attr;
|
||||
|
||||
pmu->base.task_ctx_nr = perf_invalid_context;
|
||||
pmu->base.event_init = i915_pmu_event_init;
|
||||
pmu->base.add = i915_pmu_event_add;
|
||||
|
@ -1159,7 +1159,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
|
||||
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
|
||||
if (ret)
|
||||
goto err_attr;
|
||||
goto err_groups;
|
||||
|
||||
ret = i915_pmu_register_cpuhp_state(pmu);
|
||||
if (ret)
|
||||
|
@ -1169,6 +1169,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
|
||||
err_unreg:
|
||||
perf_pmu_unregister(&pmu->base);
|
||||
err_groups:
|
||||
kfree(pmu->base.attr_groups);
|
||||
err_attr:
|
||||
pmu->base.event_init = NULL;
|
||||
free_event_attributes(pmu);
|
||||
|
@ -1194,6 +1196,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
|
|||
|
||||
perf_pmu_unregister(&pmu->base);
|
||||
pmu->base.event_init = NULL;
|
||||
kfree(pmu->base.attr_groups);
|
||||
if (!is_igp(i915))
|
||||
kfree(pmu->name);
|
||||
free_event_attributes(pmu);
|
||||
|
|
|
@ -39,9 +39,12 @@ struct i915_pmu_sample {
|
|||
|
||||
struct i915_pmu {
|
||||
/**
|
||||
* @node: List node for CPU hotplug handling.
|
||||
* @cpuhp: Struct used for CPU hotplug handling.
|
||||
*/
|
||||
struct hlist_node node;
|
||||
struct {
|
||||
struct hlist_node node;
|
||||
enum cpuhp_state slot;
|
||||
} cpuhp;
|
||||
/**
|
||||
* @base: PMU base.
|
||||
*/
|
||||
|
@ -104,6 +107,10 @@ struct i915_pmu {
|
|||
* @sleep_last: Last time GT parked for RC6 estimation.
|
||||
*/
|
||||
ktime_t sleep_last;
|
||||
/**
|
||||
* @events_attr_group: Device events attribute group.
|
||||
*/
|
||||
struct attribute_group events_attr_group;
|
||||
/**
|
||||
* @i915_attr: Memory block holding device attributes.
|
||||
*/
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#include <drm/drm_agpsupport.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
|
@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
|
|||
const struct pci_device_id *ent)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
struct drm_device *dev;
|
||||
int ret;
|
||||
|
||||
if (!ent)
|
||||
|
@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return drm_get_pci_dev(pdev, ent, &kms_driver);
|
||||
dev = drm_dev_alloc(&kms_driver, &pdev->dev);
|
||||
if (IS_ERR(dev))
|
||||
return PTR_ERR(dev);
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
dev->pdev = pdev;
|
||||
#ifdef __alpha__
|
||||
dev->hose = pdev->sysdata;
|
||||
#endif
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
|
||||
dev->agp = drm_agp_init(dev);
|
||||
if (dev->agp) {
|
||||
dev->agp->agp_mtrr = arch_phys_wc_add(
|
||||
dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size *
|
||||
1024 * 1024);
|
||||
}
|
||||
|
||||
ret = drm_dev_register(dev, ent->driver_data);
|
||||
if (ret)
|
||||
goto err_agp;
|
||||
|
||||
return 0;
|
||||
|
||||
err_agp:
|
||||
if (dev->agp)
|
||||
arch_phys_wc_del(dev->agp->agp_mtrr);
|
||||
kfree(dev->agp);
|
||||
pci_disable_device(pdev);
|
||||
err_free:
|
||||
drm_dev_put(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -575,7 +614,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
|
|||
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
|
||||
DRIVER_GEM | DRIVER_RENDER,
|
||||
.load = radeon_driver_load_kms,
|
||||
.open = radeon_driver_open_kms,
|
||||
.postclose = radeon_driver_postclose_kms,
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
#include <drm/drm_agpsupport.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
|
@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
|
|||
radeon_modeset_fini(rdev);
|
||||
radeon_device_fini(rdev);
|
||||
|
||||
if (dev->agp)
|
||||
arch_phys_wc_del(dev->agp->agp_mtrr);
|
||||
kfree(dev->agp);
|
||||
dev->agp = NULL;
|
||||
|
||||
done_free:
|
||||
kfree(rdev);
|
||||
dev->dev_private = NULL;
|
||||
|
|
|
@ -730,7 +730,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
|||
if (data->has_sp) {
|
||||
input2 = input_allocate_device();
|
||||
if (!input2) {
|
||||
input_free_device(input2);
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
|
|
@ -340,7 +340,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
|||
unsigned long **bit, int *max)
|
||||
{
|
||||
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
|
||||
usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
|
||||
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
|
||||
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
|
||||
/* The fn key on Apple USB keyboards */
|
||||
set_bit(EV_REP, hi->input->evbit);
|
||||
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
|
||||
|
|
|
@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
|
|||
struct bigben_device {
|
||||
struct hid_device *hid;
|
||||
struct hid_report *report;
|
||||
bool removed;
|
||||
u8 led_state; /* LED1 = 1 .. LED4 = 8 */
|
||||
u8 right_motor_on; /* right motor off/on 0/1 */
|
||||
u8 left_motor_force; /* left motor force 0-255 */
|
||||
|
@ -190,6 +191,9 @@ static void bigben_worker(struct work_struct *work)
|
|||
struct bigben_device, worker);
|
||||
struct hid_field *report_field = bigben->report->field[0];
|
||||
|
||||
if (bigben->removed)
|
||||
return;
|
||||
|
||||
if (bigben->work_led) {
|
||||
bigben->work_led = false;
|
||||
report_field->value[0] = 0x01; /* 1 = led message */
|
||||
|
@ -220,10 +224,16 @@ static void bigben_worker(struct work_struct *work)
|
|||
static int hid_bigben_play_effect(struct input_dev *dev, void *data,
|
||||
struct ff_effect *effect)
|
||||
{
|
||||
struct bigben_device *bigben = data;
|
||||
struct hid_device *hid = input_get_drvdata(dev);
|
||||
struct bigben_device *bigben = hid_get_drvdata(hid);
|
||||
u8 right_motor_on;
|
||||
u8 left_motor_force;
|
||||
|
||||
if (!bigben) {
|
||||
hid_err(hid, "no device data\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (effect->type != FF_RUMBLE)
|
||||
return 0;
|
||||
|
||||
|
@ -298,8 +308,8 @@ static void bigben_remove(struct hid_device *hid)
|
|||
{
|
||||
struct bigben_device *bigben = hid_get_drvdata(hid);
|
||||
|
||||
bigben->removed = true;
|
||||
cancel_work_sync(&bigben->worker);
|
||||
hid_hw_close(hid);
|
||||
hid_hw_stop(hid);
|
||||
}
|
||||
|
||||
|
@ -319,6 +329,7 @@ static int bigben_probe(struct hid_device *hid,
|
|||
return -ENOMEM;
|
||||
hid_set_drvdata(hid, bigben);
|
||||
bigben->hid = hid;
|
||||
bigben->removed = false;
|
||||
|
||||
error = hid_parse(hid);
|
||||
if (error) {
|
||||
|
@ -341,10 +352,10 @@ static int bigben_probe(struct hid_device *hid,
|
|||
|
||||
INIT_WORK(&bigben->worker, bigben_worker);
|
||||
|
||||
error = input_ff_create_memless(hidinput->input, bigben,
|
||||
error = input_ff_create_memless(hidinput->input, NULL,
|
||||
hid_bigben_play_effect);
|
||||
if (error)
|
||||
return error;
|
||||
goto error_hw_stop;
|
||||
|
||||
name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1;
|
||||
|
||||
|
@ -354,8 +365,10 @@ static int bigben_probe(struct hid_device *hid,
|
|||
sizeof(struct led_classdev) + name_sz,
|
||||
GFP_KERNEL
|
||||
);
|
||||
if (!led)
|
||||
return -ENOMEM;
|
||||
if (!led) {
|
||||
error = -ENOMEM;
|
||||
goto error_hw_stop;
|
||||
}
|
||||
name = (void *)(&led[1]);
|
||||
snprintf(name, name_sz,
|
||||
"%s:red:bigben%d",
|
||||
|
@ -369,7 +382,7 @@ static int bigben_probe(struct hid_device *hid,
|
|||
bigben->leds[n] = led;
|
||||
error = devm_led_classdev_register(&hid->dev, led);
|
||||
if (error)
|
||||
return error;
|
||||
goto error_hw_stop;
|
||||
}
|
||||
|
||||
/* initial state: LED1 is on, no rumble effect */
|
||||
|
@ -383,6 +396,10 @@ static int bigben_probe(struct hid_device *hid,
|
|||
hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
|
||||
|
||||
return 0;
|
||||
|
||||
error_hw_stop:
|
||||
hid_hw_stop(hid);
|
||||
return error;
|
||||
}
|
||||
|
||||
static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
|
||||
|
|
|
@ -1741,7 +1741,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
|
|||
|
||||
rsize = ((report->size - 1) >> 3) + 1;
|
||||
|
||||
if (rsize > HID_MAX_BUFFER_SIZE)
|
||||
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
|
||||
rsize = HID_MAX_BUFFER_SIZE - 1;
|
||||
else if (rsize > HID_MAX_BUFFER_SIZE)
|
||||
rsize = HID_MAX_BUFFER_SIZE;
|
||||
|
||||
if (csize < rsize) {
|
||||
|
|
|
@ -41,8 +41,9 @@ static const struct hid_device_id ite_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
|
||||
/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
|
||||
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_SYNAPTICS,
|
||||
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, ite_devices);
|
||||
|
|
|
@ -1256,36 +1256,35 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
|
|||
{
|
||||
int status;
|
||||
|
||||
long charge_sts = (long)data[2];
|
||||
long flags = (long) data[2];
|
||||
|
||||
*level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
|
||||
switch (data[2] & 0xe0) {
|
||||
case 0x00:
|
||||
status = POWER_SUPPLY_STATUS_CHARGING;
|
||||
break;
|
||||
case 0x20:
|
||||
status = POWER_SUPPLY_STATUS_FULL;
|
||||
*level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
|
||||
break;
|
||||
case 0x40:
|
||||
if (flags & 0x80)
|
||||
switch (flags & 0x07) {
|
||||
case 0:
|
||||
status = POWER_SUPPLY_STATUS_CHARGING;
|
||||
break;
|
||||
case 1:
|
||||
status = POWER_SUPPLY_STATUS_FULL;
|
||||
*level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
|
||||
break;
|
||||
case 2:
|
||||
status = POWER_SUPPLY_STATUS_NOT_CHARGING;
|
||||
break;
|
||||
default:
|
||||
status = POWER_SUPPLY_STATUS_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
else
|
||||
status = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
break;
|
||||
case 0xe0:
|
||||
status = POWER_SUPPLY_STATUS_NOT_CHARGING;
|
||||
break;
|
||||
default:
|
||||
status = POWER_SUPPLY_STATUS_UNKNOWN;
|
||||
}
|
||||
|
||||
*charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
|
||||
if (test_bit(3, &charge_sts)) {
|
||||
if (test_bit(3, &flags)) {
|
||||
*charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
|
||||
}
|
||||
if (test_bit(4, &charge_sts)) {
|
||||
if (test_bit(4, &flags)) {
|
||||
*charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
|
||||
}
|
||||
|
||||
if (test_bit(5, &charge_sts)) {
|
||||
if (test_bit(5, &flags)) {
|
||||
*level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue