Merge branch 'from-rusty/modules-next' into for-4.5/core
As agreed with Rusty, we're taking a current module-next pile through livepatching.git, as it contains solely patches that are pre-requisity for module page protection cleanups in livepatching. Rusty will be restarting module-next from scratch. Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
commit
fc284d6318
|
@ -587,7 +587,7 @@ used to control it:
|
|||
|
||||
modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
|
||||
preaction=<preaction type> preop=<preop type> start_now=x
|
||||
nowayout=x ifnum_to_use=n
|
||||
nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
|
||||
|
||||
ifnum_to_use specifies which interface the watchdog timer should use.
|
||||
The default is -1, which means to pick the first one registered.
|
||||
|
@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
|
|||
occur (if pretimeout is zero, then pretimeout will not be enabled). Note
|
||||
that the pretimeout is the time before the final timeout. So if the
|
||||
timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
|
||||
will occur in 40 second (10 seconds before the timeout).
|
||||
will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
|
||||
is the value of timeout which is set on kernel panic, in order to let actions
|
||||
such as kdump to occur during panic.
|
||||
|
||||
The action may be "reset", "power_cycle", or "power_off", and
|
||||
specifies what to do when the timer times out, and defaults to
|
||||
|
@ -634,6 +636,7 @@ for configuring the watchdog:
|
|||
ipmi_watchdog.preop=<preop type>
|
||||
ipmi_watchdog.start_now=x
|
||||
ipmi_watchdog.nowayout=x
|
||||
ipmi_watchdog.panic_wdt_timeout=<t>
|
||||
|
||||
The options are the same as the module parameter options.
|
||||
|
||||
|
|
|
@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0
|
|||
parameter.
|
||||
1: The multi-queue block layer is instantiated with a hardware dispatch
|
||||
queue for each CPU node in the system.
|
||||
|
||||
use_lightnvm=[0/1]: Default: 0
|
||||
Register device with LightNVM. Requires blk-mq to be used.
|
||||
|
|
|
@ -32,6 +32,7 @@ Supported adapters:
|
|||
* Intel Sunrise Point-LP (PCH)
|
||||
* Intel DNV (SOC)
|
||||
* Intel Broxton (SOC)
|
||||
* Intel Lewisburg (PCH)
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
||||
|
|
|
@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
hwp_only
|
||||
Only load intel_pstate on systems which support
|
||||
hardware P state control (HWP) if available.
|
||||
no_acpi
|
||||
Don't use ACPI processor performance control objects
|
||||
_PSS and _PPC specified limits.
|
||||
|
||||
intremap= [X86-64, Intel-IOMMU]
|
||||
on enable Interrupt Remapping (default)
|
||||
|
|
19
MAINTAINERS
19
MAINTAINERS
|
@ -2449,7 +2449,9 @@ F: drivers/firmware/broadcom/*
|
|||
|
||||
BROADCOM STB NAND FLASH DRIVER
|
||||
M: Brian Norris <computersforpeace@gmail.com>
|
||||
M: Kamal Dasu <kdasu.kdev@gmail.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
S: Maintained
|
||||
F: drivers/mtd/nand/brcmnand/
|
||||
|
||||
|
@ -2929,10 +2931,9 @@ S: Maintained
|
|||
F: drivers/platform/x86/compal-laptop.c
|
||||
|
||||
CONEXANT ACCESSRUNNER USB DRIVER
|
||||
M: Simon Arlott <cxacru@fire.lp0.eu>
|
||||
L: accessrunner-general@lists.sourceforge.net
|
||||
W: http://accessrunner.sourceforge.net/
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/usb/atm/cxacru.c
|
||||
|
||||
CONFIGFS
|
||||
|
@ -4409,6 +4410,7 @@ K: fmc_d.*register
|
|||
|
||||
FPGA MANAGER FRAMEWORK
|
||||
M: Alan Tull <atull@opensource.altera.com>
|
||||
R: Moritz Fischer <moritz.fischer@ettus.com>
|
||||
S: Maintained
|
||||
F: drivers/fpga/
|
||||
F: include/linux/fpga/fpga-mgr.h
|
||||
|
@ -6364,6 +6366,7 @@ F: arch/*/include/asm/pmem.h
|
|||
LIGHTNVM PLATFORM SUPPORT
|
||||
M: Matias Bjorling <mb@lightnvm.io>
|
||||
W: http://github/OpenChannelSSD
|
||||
L: linux-block@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/lightnvm/
|
||||
F: include/linux/lightnvm.h
|
||||
|
@ -7902,6 +7905,18 @@ S: Maintained
|
|||
F: net/openvswitch/
|
||||
F: include/uapi/linux/openvswitch.h
|
||||
|
||||
OPERATING PERFORMANCE POINTS (OPP)
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Stephen Boyd <sboyd@codeaurora.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
|
||||
F: drivers/base/power/opp/
|
||||
F: include/linux/pm_opp.h
|
||||
F: Documentation/power/opp.txt
|
||||
F: Documentation/devicetree/bindings/opp/
|
||||
|
||||
OPL4 DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
|
|||
|
||||
/* The small sections were sorted to the end of the segment.
|
||||
The following should definitely cover them. */
|
||||
gp = (u64)me->module_core + me->core_size - 0x8000;
|
||||
gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
|
||||
got = sechdrs[me->arch.gotsecindex].sh_addr;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
|
|
|
@ -372,8 +372,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
|
|||
return NULL;
|
||||
|
||||
init_unwind_table(table, module->name,
|
||||
module->module_core, module->core_size,
|
||||
module->module_init, module->init_size,
|
||||
module->core_layout.base, module->core_layout.size,
|
||||
module->init_layout.base, module->init_layout.size,
|
||||
table_start, table_size,
|
||||
NULL, 0);
|
||||
|
||||
|
|
|
@ -486,7 +486,10 @@
|
|||
compatible = "fsl,imx27-usb";
|
||||
reg = <0x10024000 0x200>;
|
||||
interrupts = <56>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
|
||||
<&clks IMX27_CLK_USB_AHB_GATE>,
|
||||
<&clks IMX27_CLK_USB_DIV>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
fsl,usbmisc = <&usbmisc 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -495,7 +498,10 @@
|
|||
compatible = "fsl,imx27-usb";
|
||||
reg = <0x10024200 0x200>;
|
||||
interrupts = <54>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
|
||||
<&clks IMX27_CLK_USB_AHB_GATE>,
|
||||
<&clks IMX27_CLK_USB_DIV>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
fsl,usbmisc = <&usbmisc 1>;
|
||||
dr_mode = "host";
|
||||
status = "disabled";
|
||||
|
@ -505,7 +511,10 @@
|
|||
compatible = "fsl,imx27-usb";
|
||||
reg = <0x10024400 0x200>;
|
||||
interrupts = <55>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
|
||||
<&clks IMX27_CLK_USB_AHB_GATE>,
|
||||
<&clks IMX27_CLK_USB_DIV>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
fsl,usbmisc = <&usbmisc 2>;
|
||||
dr_mode = "host";
|
||||
status = "disabled";
|
||||
|
@ -515,7 +524,6 @@
|
|||
#index-cells = <1>;
|
||||
compatible = "fsl,imx27-usbmisc";
|
||||
reg = <0x10024600 0x200>;
|
||||
clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
|
||||
};
|
||||
|
||||
sahara2: sahara@10025000 {
|
||||
|
|
|
@ -32,7 +32,7 @@ struct plt_entries {
|
|||
|
||||
static bool in_init(const struct module *mod, u32 addr)
|
||||
{
|
||||
return addr - (u32)mod->module_init < mod->init_size;
|
||||
return addr - (u32)mod->init_layout.base < mod->init_layout.size;
|
||||
}
|
||||
|
||||
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
|
||||
|
|
|
@ -563,18 +563,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
if (vcpu->arch.power_off || vcpu->arch.pause)
|
||||
vcpu_sleep(vcpu);
|
||||
|
||||
/*
|
||||
* Disarming the background timer must be done in a
|
||||
* preemptible context, as this call may sleep.
|
||||
*/
|
||||
kvm_timer_flush_hwstate(vcpu);
|
||||
|
||||
/*
|
||||
* Preparing the interrupts to be injected also
|
||||
* involves poking the GIC, which must be done in a
|
||||
* non-preemptible context.
|
||||
*/
|
||||
preempt_disable();
|
||||
kvm_timer_flush_hwstate(vcpu);
|
||||
kvm_vgic_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
|
|
@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
|
|||
__kvm_flush_dcache_pud(pud);
|
||||
}
|
||||
|
||||
static bool kvm_is_device_pfn(unsigned long pfn)
|
||||
{
|
||||
return !pfn_valid(pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_dissolve_pmd() - clear and flush huge PMD entry
|
||||
* @kvm: pointer to kvm structure.
|
||||
|
@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
|
|||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
|
||||
/* No need to invalidate the cache for device mappings */
|
||||
if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
|
||||
if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
|
||||
kvm_flush_dcache_pte(old_pte);
|
||||
|
||||
put_page(virt_to_page(pte));
|
||||
|
@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
|
|||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
do {
|
||||
if (!pte_none(*pte) &&
|
||||
(pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
|
||||
if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
|
||||
kvm_flush_dcache_pte(*pte);
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
|
@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
|||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_is_device_pfn(unsigned long pfn)
|
||||
{
|
||||
return !pfn_valid(pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_wp_ptes - write protect PMD range
|
||||
* @pmd: pointer to pmd entry
|
||||
|
|
|
@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_834220
|
||||
bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
|
||||
depends on KVM
|
||||
default y
|
||||
help
|
||||
This option adds an alternative code sequence to work around ARM
|
||||
erratum 834220 on Cortex-A57 parts up to r1p2.
|
||||
|
||||
Affected Cortex-A57 parts might report a Stage 2 translation
|
||||
fault as the result of a Stage 1 fault for load crossing a
|
||||
page boundary when there is a permission or device memory
|
||||
alignment fault at Stage 1 and a translation fault at Stage 2.
|
||||
|
||||
The workaround is to verify that the Stage 1 translation
|
||||
doesn't generate a fault before handling the Stage 2 fault.
|
||||
Please note that this does not necessarily enable the workaround,
|
||||
as it depends on the alternative framework, which will only patch
|
||||
the kernel if an affected CPU is detected.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_845719
|
||||
bool "Cortex-A53: 845719: a load might read incorrect data"
|
||||
depends on COMPAT
|
||||
|
|
|
@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
|
|||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-ce",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 250,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
|
|
|
@ -64,27 +64,31 @@ do { \
|
|||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1; \
|
||||
union { typeof(*p) __val; char __c[1]; } __u; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile ("ldarb %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u8 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile ("ldarh %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u16 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u32 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("ldar %0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u64 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
} \
|
||||
___p1; \
|
||||
__u.__val; \
|
||||
})
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ptrace.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#ifdef __AARCH64EB__
|
||||
|
@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
|
||||
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
|
||||
|
||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
||||
{
|
||||
|
|
|
@ -29,8 +29,9 @@
|
|||
#define ARM64_HAS_PAN 4
|
||||
#define ARM64_HAS_LSE_ATOMICS 5
|
||||
#define ARM64_WORKAROUND_CAVIUM_23154 6
|
||||
#define ARM64_WORKAROUND_834220 7
|
||||
|
||||
#define ARM64_NCAPS 7
|
||||
#define ARM64_NCAPS 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
|
@ -26,22 +25,16 @@
|
|||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
extern struct dma_map_ops *dma_ops;
|
||||
extern struct dma_map_ops dummy_dma_ops;
|
||||
|
||||
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
{
|
||||
if (unlikely(!dev))
|
||||
return dma_ops;
|
||||
else if (dev->archdata.dma_ops)
|
||||
if (dev && dev->archdata.dma_ops)
|
||||
return dev->archdata.dma_ops;
|
||||
else if (acpi_disabled)
|
||||
return dma_ops;
|
||||
|
||||
/*
|
||||
* When ACPI is enabled, if arch_set_dma_ops is not called,
|
||||
* we will disable device DMA capability by setting it
|
||||
* to dummy_dma_ops.
|
||||
* We expect no ISA devices, and all other DMA masters are expected to
|
||||
* have someone call arch_setup_dma_ops at device creation time.
|
||||
*/
|
||||
return &dummy_dma_ops;
|
||||
}
|
||||
|
|
|
@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|||
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
|
||||
}
|
||||
|
||||
/*
|
||||
* vcpu_reg should always be passed a register number coming from a
|
||||
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
|
||||
* with banked registers.
|
||||
*/
|
||||
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return vcpu_reg32(vcpu, reg_num);
|
||||
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
|
|||
#define destroy_context(mm) do { } while(0)
|
||||
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
||||
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
|
||||
|
||||
/*
|
||||
* This is called when "tsk" is about to enter lazy TLB mode.
|
||||
|
|
|
@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
|
||||
|
|
|
@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
(1 << MIDR_VARIANT_SHIFT) | 2),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_834220
|
||||
{
|
||||
/* Cortex-A57 r0p0 - r1p2 */
|
||||
.desc = "ARM erratum 834220",
|
||||
.capability = ARM64_WORKAROUND_834220,
|
||||
MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
|
||||
(1 << MIDR_VARIANT_SHIFT) | 2),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_845719
|
||||
{
|
||||
/* Cortex-A53 r0p[01234] */
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
/*
|
||||
* In case the boot CPU is hotpluggable, we record its initial state and
|
||||
|
@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
|
|||
*/
|
||||
seq_printf(m, "processor\t: %d\n", i);
|
||||
|
||||
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
||||
loops_per_jiffy / (500000UL/HZ),
|
||||
loops_per_jiffy / (5000UL/HZ) % 100);
|
||||
|
||||
/*
|
||||
* Dump out the common processor features in a single line.
|
||||
* Userspace should read the hwcaps with getauxval(AT_HWCAP)
|
||||
|
|
|
@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void)
|
|||
{
|
||||
efi_memory_desc_t *md;
|
||||
|
||||
init_new_context(NULL, &efi_mm);
|
||||
|
||||
for_each_efi_memory_desc(&memmap, md) {
|
||||
u64 paddr, npages, size;
|
||||
pgprot_t prot;
|
||||
|
@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void)
|
|||
else
|
||||
prot = PAGE_KERNEL;
|
||||
|
||||
create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
|
||||
create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size,
|
||||
__pgprot(pgprot_val(prot) | PTE_NG));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init);
|
|||
|
||||
static void efi_set_pgd(struct mm_struct *mm)
|
||||
{
|
||||
if (mm == &init_mm)
|
||||
cpu_set_reserved_ttbr0();
|
||||
else
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
|
||||
local_flush_tlb_all();
|
||||
if (icache_is_aivivt())
|
||||
__local_flush_icache_all();
|
||||
switch_mm(NULL, mm, NULL);
|
||||
}
|
||||
|
||||
void efi_virtmap_load(void)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <linux/ftrace.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
*/
|
||||
local_dbg_save(flags);
|
||||
|
||||
/*
|
||||
* Function graph tracer state gets incosistent when the kernel
|
||||
* calls functions that never return (aka suspend finishers) hence
|
||||
* disable graph tracing during their execution.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
/*
|
||||
* mm context saved on the stack, it will be restored when
|
||||
* the cpu comes out of reset through the identity mapped
|
||||
|
@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
hw_breakpoint_restore(NULL);
|
||||
}
|
||||
|
||||
unpause_graph_tracing();
|
||||
|
||||
/*
|
||||
* Restore pstate flags. OS lock and mdscr have been already
|
||||
* restored, so from this point onwards, debugging is fully
|
||||
|
|
|
@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
|
|||
ENDPROC(__kvm_flush_vm_context)
|
||||
|
||||
__kvm_hyp_panic:
|
||||
// Stash PAR_EL1 before corrupting it in __restore_sysregs
|
||||
mrs x0, par_el1
|
||||
push x0, xzr
|
||||
|
||||
// Guess the context by looking at VTTBR:
|
||||
// If zero, then we're already a host.
|
||||
// Otherwise restore a minimal host context before panicing.
|
||||
|
@ -898,7 +902,7 @@ __kvm_hyp_panic:
|
|||
mrs x3, esr_el2
|
||||
mrs x4, far_el2
|
||||
mrs x5, hpfar_el2
|
||||
mrs x6, par_el1
|
||||
pop x6, xzr // active context PAR_EL1
|
||||
mrs x7, tpidr_el2
|
||||
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
|
@ -914,7 +918,7 @@ __kvm_hyp_panic:
|
|||
ENDPROC(__kvm_hyp_panic)
|
||||
|
||||
__hyp_panic_str:
|
||||
.ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
|
||||
.ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
|
||||
|
||||
.align 2
|
||||
|
||||
|
@ -1015,9 +1019,15 @@ el1_trap:
|
|||
b.ne 1f // Not an abort we care about
|
||||
|
||||
/* This is an abort. Check for permission fault */
|
||||
alternative_if_not ARM64_WORKAROUND_834220
|
||||
and x2, x1, #ESR_ELx_FSC_TYPE
|
||||
cmp x2, #FSC_PERM
|
||||
b.ne 1f // Not a permission fault
|
||||
alternative_else
|
||||
nop // Use the permission fault path to
|
||||
nop // check for a valid S1 translation,
|
||||
nop // regardless of the ESR value.
|
||||
alternative_endif
|
||||
|
||||
/*
|
||||
* Check for Stage-1 page table walk, which is guaranteed
|
||||
|
|
|
@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
|||
|
||||
/* Note: These now point to the banked copies */
|
||||
*vcpu_spsr(vcpu) = new_spsr_value;
|
||||
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
/* Branch to exception vector */
|
||||
if (sctlr & (1 << 13))
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
|
@ -28,9 +29,6 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
|
||||
bool coherent)
|
||||
{
|
||||
|
@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
|
|||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
|
||||
ret = atomic_pool_init();
|
||||
|
||||
return ret;
|
||||
return atomic_pool_init();
|
||||
}
|
||||
arch_initcall(arm64_dma_init);
|
||||
|
||||
|
@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
|
||||
size_t iosize = size;
|
||||
void *addr;
|
||||
|
||||
if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
|
||||
return NULL;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
/*
|
||||
* Some drivers rely on this, and we probably don't want the
|
||||
* possibility of stale kernel data being read by devices anyway.
|
||||
|
@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
struct page **pages;
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||
|
||||
pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
|
||||
flush_page);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
iommu_dma_free(dev, pages, size, handle);
|
||||
iommu_dma_free(dev, pages, iosize, handle);
|
||||
} else {
|
||||
struct page *page;
|
||||
/*
|
||||
|
@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (iommu_dma_mapping_error(dev, *handle)) {
|
||||
if (coherent)
|
||||
__free_pages(page, get_order(size));
|
||||
|
@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs)
|
||||
{
|
||||
size_t iosize = size;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
/*
|
||||
* @cpu_addr will be one of 3 things depending on how it was allocated:
|
||||
* - A remapped array of pages from iommu_dma_alloc(), for all
|
||||
|
@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||
* Hence how dodgy the below logic looks...
|
||||
*/
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
iommu_dma_unmap_page(dev, handle, size, 0, NULL);
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
|
||||
__free_from_pool(cpu_addr, size);
|
||||
} else if (is_vmalloc_addr(cpu_addr)){
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return;
|
||||
iommu_dma_free(dev, area->pages, size, &handle);
|
||||
iommu_dma_free(dev, area->pages, iosize, &handle);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else {
|
||||
iommu_dma_unmap_page(dev, handle, size, 0, NULL);
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
|
||||
__free_pages(virt_to_page(cpu_addr), get_order(size));
|
||||
}
|
||||
}
|
||||
|
@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
if (!acpi_disabled && !dev->archdata.dma_ops)
|
||||
dev->archdata.dma_ops = dma_ops;
|
||||
if (!dev->archdata.dma_ops)
|
||||
dev->archdata.dma_ops = &swiotlb_dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
|
|
|
@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
|
|||
* for now. This will get more fine grained later once all memory
|
||||
* is mapped
|
||||
*/
|
||||
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
|
||||
unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
|
||||
|
||||
if (end < kernel_x_start) {
|
||||
create_mapping(start, __phys_to_virt(start),
|
||||
|
@ -451,18 +451,18 @@ static void __init fixup_executable(void)
|
|||
{
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
/* now that we are actually fully mapped, make the start/end more fine grained */
|
||||
if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
|
||||
if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
|
||||
unsigned long aligned_start = round_down(__pa(_stext),
|
||||
SECTION_SIZE);
|
||||
SWAPPER_BLOCK_SIZE);
|
||||
|
||||
create_mapping(aligned_start, __phys_to_virt(aligned_start),
|
||||
__pa(_stext) - aligned_start,
|
||||
PAGE_KERNEL);
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
|
||||
if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
|
||||
unsigned long aligned_end = round_up(__pa(__init_end),
|
||||
SECTION_SIZE);
|
||||
SWAPPER_BLOCK_SIZE);
|
||||
create_mapping(__pa(__init_end), (unsigned long)__init_end,
|
||||
aligned_end - __pa(__init_end),
|
||||
PAGE_KERNEL);
|
||||
|
@ -475,7 +475,7 @@ void mark_rodata_ro(void)
|
|||
{
|
||||
create_mapping_late(__pa(_stext), (unsigned long)_stext,
|
||||
(unsigned long)_etext - (unsigned long)_stext,
|
||||
PAGE_KERNEL_EXEC | PTE_RDONLY);
|
||||
PAGE_KERNEL_ROX);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -118,9 +118,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
|||
* Increase core size to make room for GOT and set start
|
||||
* offset for GOT.
|
||||
*/
|
||||
module->core_size = ALIGN(module->core_size, 4);
|
||||
module->arch.got_offset = module->core_size;
|
||||
module->core_size += module->arch.got_size;
|
||||
module->core_layout.size = ALIGN(module->core_layout.size, 4);
|
||||
module->arch.got_offset = module->core_layout.size;
|
||||
module->core_layout.size += module->arch.got_size;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -177,7 +177,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|||
if (!info->got_initialized) {
|
||||
Elf32_Addr *gotent;
|
||||
|
||||
gotent = (module->module_core
|
||||
gotent = (module->core_layout.base
|
||||
+ module->arch.got_offset
|
||||
+ info->got_offset);
|
||||
*gotent = relocation;
|
||||
|
@ -255,8 +255,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|||
*/
|
||||
pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n",
|
||||
relocation, module->arch.got_offset,
|
||||
module->module_core);
|
||||
relocation -= ((unsigned long)module->module_core
|
||||
module->core_layout.base);
|
||||
relocation -= ((unsigned long)module->core_layout.base
|
||||
+ module->arch.got_offset);
|
||||
*location = relocation;
|
||||
break;
|
||||
|
|
|
@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
|
|||
static inline int
|
||||
in_init (const struct module *mod, uint64_t addr)
|
||||
{
|
||||
return addr - (uint64_t) mod->module_init < mod->init_size;
|
||||
return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
|
||||
}
|
||||
|
||||
static inline int
|
||||
in_core (const struct module *mod, uint64_t addr)
|
||||
{
|
||||
return addr - (uint64_t) mod->module_core < mod->core_size;
|
||||
return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -675,7 +675,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
|
|||
break;
|
||||
|
||||
case RV_BDREL:
|
||||
val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
|
||||
val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
|
||||
break;
|
||||
|
||||
case RV_LTV:
|
||||
|
@ -810,15 +810,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
|
|||
* addresses have been selected...
|
||||
*/
|
||||
uint64_t gp;
|
||||
if (mod->core_size > MAX_LTOFF)
|
||||
if (mod->core_layout.size > MAX_LTOFF)
|
||||
/*
|
||||
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
|
||||
* at the end of the module.
|
||||
*/
|
||||
gp = mod->core_size - MAX_LTOFF / 2;
|
||||
gp = mod->core_layout.size - MAX_LTOFF / 2;
|
||||
else
|
||||
gp = mod->core_size / 2;
|
||||
gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
|
||||
gp = mod->core_layout.size / 2;
|
||||
gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
|
||||
mod->arch.gp = gp;
|
||||
DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
|
||||
}
|
||||
|
|
|
@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val,
|
|||
tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3);
|
||||
|
||||
/* Init, or core PLT? */
|
||||
if (location >= mod->module_core
|
||||
&& location < mod->module_core + mod->core_size)
|
||||
if (location >= mod->core_layout.base
|
||||
&& location < mod->core_layout.base + mod->core_layout.size)
|
||||
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
|
||||
else
|
||||
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
|
||||
|
|
|
@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
|
|||
AR71XX_RESET_SIZE);
|
||||
ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
|
||||
AR71XX_PLL_SIZE);
|
||||
ath79_detect_sys_type();
|
||||
ath79_ddr_ctrl_init();
|
||||
|
||||
ath79_detect_sys_type();
|
||||
if (mips_machtype != ATH79_MACH_GENERIC_OF)
|
||||
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
|
||||
|
||||
|
@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
|
|||
"Generic",
|
||||
"Generic AR71XX/AR724X/AR913X based board",
|
||||
ath79_generic_init);
|
||||
|
||||
MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
|
||||
"DTB",
|
||||
"Generic AR71XX/AR724X/AR913X based board (DT)",
|
||||
NULL);
|
||||
|
|
|
@ -107,7 +107,7 @@
|
|||
miscintc: interrupt-controller@18060010 {
|
||||
compatible = "qca,ar9132-misc-intc",
|
||||
"qca,ar7100-misc-intc";
|
||||
reg = <0x18060010 0x4>;
|
||||
reg = <0x18060010 0x8>;
|
||||
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <6>;
|
||||
|
|
|
@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
|
|||
{
|
||||
/* avoid <linux/mm.h> include hell */
|
||||
extern unsigned long max_mapnr;
|
||||
unsigned long pfn_offset = ARCH_PFN_OFFSET;
|
||||
|
||||
return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
|
||||
return pfn >= pfn_offset && pfn < max_mapnr;
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_SPARSEMEM)
|
||||
|
|
|
@ -205,11 +205,11 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
|
|||
|| s->sh_entsize != ~0UL)
|
||||
continue;
|
||||
s->sh_entsize =
|
||||
get_offset((unsigned long *)&mod->core_size, s);
|
||||
get_offset((unsigned long *)&mod->core_layout.size, s);
|
||||
}
|
||||
|
||||
if (m == 0)
|
||||
mod->core_text_size = mod->core_size;
|
||||
mod->core_layout.text_size = mod->core_layout.size;
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v)
|
|||
layout_sections(&mod, hdr, sechdrs, secstrings);
|
||||
}
|
||||
|
||||
v->load_addr = alloc_progmem(mod.core_size);
|
||||
v->load_addr = alloc_progmem(mod.core_layout.size);
|
||||
if (!v->load_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|||
|
||||
base = (inst >> 21) & 0x1f;
|
||||
op_inst = (inst >> 16) & 0x1f;
|
||||
offset = inst & 0xffff;
|
||||
offset = (int16_t)inst;
|
||||
cache = (inst >> 16) & 0x3;
|
||||
op = (inst >> 18) & 0x7;
|
||||
|
||||
|
|
|
@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
|
|||
|
||||
FEXPORT(__kvm_mips_load_asid)
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
|
||||
/* addresses shift to 0x80000000 */
|
||||
bltz t0, 1f /* If kernel */
|
||||
PTR_L t0, VCPU_COP0(k1)
|
||||
LONG_L t0, COP0_STATUS(t0)
|
||||
andi t0, KSU_USER | ST0_ERL | ST0_EXL
|
||||
xori t0, KSU_USER
|
||||
bnez t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
|
@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
|
|||
mtc0 t0, CP0_EPC
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
|
||||
/* addresses shift to 0x80000000 */
|
||||
bltz t0, 1f /* If kernel */
|
||||
PTR_L t0, VCPU_COP0(k1)
|
||||
LONG_L t0, COP0_STATUS(t0)
|
||||
andi t0, KSU_USER | ST0_ERL | ST0_EXL
|
||||
xori t0, KSU_USER
|
||||
bnez t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
|
|
|
@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
|
||||
if (!gebase) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_cpu;
|
||||
goto out_uninit_cpu;
|
||||
}
|
||||
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
||||
ALIGN(size, PAGE_SIZE), gebase);
|
||||
|
@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
out_free_gebase:
|
||||
kfree(gebase);
|
||||
|
||||
out_uninit_cpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
|
||||
out_free_cpu:
|
||||
kfree(vcpu);
|
||||
|
||||
|
|
|
@ -108,6 +108,9 @@ config PGTABLE_LEVELS
|
|||
default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
|
||||
default 2
|
||||
|
||||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y if PA20
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
#ifndef _ASM_PARISC64_HUGETLB_H
|
||||
#define _ASM_PARISC64_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
unsigned long len) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
{
|
||||
free_pgd_range(tlb, addr, end, floor, ceiling);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return pte_wrprotect(pte);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t old_pte = *ptep;
|
||||
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
|
||||
}
|
||||
|
||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
int changed = !pte_same(*ptep, pte);
|
||||
if (changed) {
|
||||
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
flush_tlb_page(vma, addr);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_PARISC64_HUGETLB_H */
|
|
@ -145,11 +145,22 @@ extern int npmem_ranges;
|
|||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
|
||||
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
|
||||
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
|
||||
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
|
||||
#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
|
||||
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
|
||||
#else
|
||||
# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
|
||||
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
|
||||
#endif
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
PxD_FLAG_VALID |
|
||||
PxD_FLAG_ATTACHED)
|
||||
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
|
||||
/* The first pmd entry also is marked with _PAGE_GATEWAY as
|
||||
/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
|
||||
* a signal that this pmd may not be freed */
|
||||
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
|
||||
#endif
|
||||
|
|
|
@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
|
||||
|
||||
/* This is the size of the initially mapped kernel memory */
|
||||
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
|
||||
#else
|
||||
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
|
||||
#endif
|
||||
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
|
@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
|
||||
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
|
||||
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
|
||||
/* bit 21 was formerly the FLUSH bit but is now unused */
|
||||
#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
|
||||
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
|
||||
|
||||
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
|
||||
|
@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
|
||||
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
|
||||
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
|
||||
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
|
||||
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
|
||||
|
||||
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
|
||||
|
@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
|
||||
#define PxD_FLAG_MASK (0xf)
|
||||
#define PxD_FLAG_SHIFT (4)
|
||||
#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
|
||||
#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -362,6 +367,18 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; ret
|
|||
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
/*
|
||||
* Huge pte definitions.
|
||||
*/
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
|
||||
#else
|
||||
#define pte_huge(pte) (0)
|
||||
#define pte_mkhuge(pte) (pte)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
/* Find an entry in the second-level page table.. */
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
#define pmd_offset(dir,address) \
|
||||
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
|
||||
((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
|
||||
#else
|
||||
#define pmd_offset(dir,addr) ((pmd_t *) dir)
|
||||
#endif
|
||||
|
|
|
@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
|
|||
*/
|
||||
typedef unsigned int elf_caddr_t;
|
||||
|
||||
#define start_thread_som(regs, new_pc, new_sp) do { \
|
||||
unsigned long *sp = (unsigned long *)new_sp; \
|
||||
__u32 spaceid = (__u32)current->mm->context; \
|
||||
unsigned long pc = (unsigned long)new_pc; \
|
||||
/* offset pc for priv. level */ \
|
||||
pc |= 3; \
|
||||
\
|
||||
regs->iasq[0] = spaceid; \
|
||||
regs->iasq[1] = spaceid; \
|
||||
regs->iaoq[0] = pc; \
|
||||
regs->iaoq[1] = pc + 4; \
|
||||
regs->sr[2] = LINUX_GATEWAY_SPACE; \
|
||||
regs->sr[3] = 0xffff; \
|
||||
regs->sr[4] = spaceid; \
|
||||
regs->sr[5] = spaceid; \
|
||||
regs->sr[6] = spaceid; \
|
||||
regs->sr[7] = spaceid; \
|
||||
regs->gr[ 0] = USER_PSW; \
|
||||
regs->gr[30] = ((new_sp)+63)&~63; \
|
||||
regs->gr[31] = pc; \
|
||||
\
|
||||
get_user(regs->gr[26],&sp[0]); \
|
||||
get_user(regs->gr[25],&sp[-1]); \
|
||||
get_user(regs->gr[24],&sp[-2]); \
|
||||
get_user(regs->gr[23],&sp[-3]); \
|
||||
} while(0)
|
||||
|
||||
/* The ELF abi wants things done a "wee bit" differently than
|
||||
* som does. Supporting this behavior here avoids
|
||||
* having our own version of create_elf_tables.
|
||||
|
|
|
@ -49,16 +49,6 @@
|
|||
#define MADV_DONTFORK 10 /* don't inherit across fork */
|
||||
#define MADV_DOFORK 11 /* do inherit across fork */
|
||||
|
||||
/* The range 12-64 is reserved for page size specification. */
|
||||
#define MADV_4K_PAGES 12 /* Use 4K pages */
|
||||
#define MADV_16K_PAGES 14 /* Use 16K pages */
|
||||
#define MADV_64K_PAGES 16 /* Use 64K pages */
|
||||
#define MADV_256K_PAGES 18 /* Use 256K pages */
|
||||
#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
|
||||
#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
|
||||
#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
|
||||
#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
|
||||
|
||||
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
|
||||
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
|
||||
|
||||
|
|
|
@ -289,6 +289,14 @@ int main(void)
|
|||
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
|
||||
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
|
||||
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
|
||||
BLANK();
|
||||
/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
|
||||
* and kernel data on physical huge pages */
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
|
||||
#else
|
||||
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
|
||||
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
|
||||
|
|
|
@ -502,21 +502,38 @@
|
|||
STREG \pte,0(\ptp)
|
||||
.endm
|
||||
|
||||
/* We have (depending on the page size):
|
||||
* - 38 to 52-bit Physical Page Number
|
||||
* - 12 to 26-bit page offset
|
||||
*/
|
||||
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
|
||||
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
.macro convert_for_tlb_insert20 pte
|
||||
.macro convert_for_tlb_insert20 pte,tmp
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
copy \pte,\tmp
|
||||
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
|
||||
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
|
||||
#else /* Huge pages disabled */
|
||||
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Convert the pte and prot to tlb insertion values. How
|
||||
* this happens is quite subtle, read below */
|
||||
.macro make_insert_tlb spc,pte,prot
|
||||
.macro make_insert_tlb spc,pte,prot,tmp
|
||||
space_to_prot \spc \prot /* create prot id from space */
|
||||
/* The following is the real subtlety. This is depositing
|
||||
* T <-> _PAGE_REFTRAP
|
||||
|
@ -553,7 +570,7 @@
|
|||
depdi 1,12,1,\prot
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
convert_for_tlb_insert20 \pte
|
||||
convert_for_tlb_insert20 \pte \tmp
|
||||
.endm
|
||||
|
||||
/* Identical macro to make_insert_tlb above, except it
|
||||
|
@ -646,17 +663,12 @@
|
|||
|
||||
|
||||
/*
|
||||
* Align fault_vector_20 on 4K boundary so that both
|
||||
* fault_vector_11 and fault_vector_20 are on the
|
||||
* same page. This is only necessary as long as we
|
||||
* write protect the kernel text, which we may stop
|
||||
* doing once we use large page translations to cover
|
||||
* the static part of the kernel address space.
|
||||
* Fault_vectors are architecturally required to be aligned on a 2K
|
||||
* boundary
|
||||
*/
|
||||
|
||||
.text
|
||||
|
||||
.align 4096
|
||||
.align 2048
|
||||
|
||||
ENTRY(fault_vector_20)
|
||||
/* First vector is invalid (0) */
|
||||
|
@ -1147,7 +1159,7 @@ dtlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
|
@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
|
@ -1267,7 +1279,7 @@ dtlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1295,7 +1307,7 @@ nadtlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1404,7 +1416,7 @@ itlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
iitlbt pte,prot
|
||||
|
||||
|
@ -1428,7 +1440,7 @@ naitlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
iitlbt pte,prot
|
||||
|
||||
|
@ -1514,7 +1526,7 @@ itlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1534,7 +1546,7 @@ naitlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1566,7 +1578,7 @@ dbit_trap_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
|
||||
update_dirty ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
|
@ -1610,7 +1622,7 @@ dbit_trap_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
|
||||
update_dirty ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ $bss_loop:
|
|||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
/* Initialize startup VM. Just map first 8/16 MB of memory */
|
||||
/* Initialize startup VM. Just map first 16/32 MB of memory */
|
||||
load32 PA(swapper_pg_dir),%r4
|
||||
mtctl %r4,%cr24 /* Initialize kernel root pointer */
|
||||
mtctl %r4,%cr25 /* Initialize user root pointer */
|
||||
|
@ -107,7 +107,7 @@ $bss_loop:
|
|||
/* Now initialize the PTEs themselves. We use RWX for
|
||||
* everything ... it will get remapped correctly later */
|
||||
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
|
||||
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
|
||||
load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
|
||||
load32 PA(pg0),%r1
|
||||
|
||||
$pgt_fill_loop:
|
||||
|
|
|
@ -42,9 +42,9 @@
|
|||
* We are not doing SEGREL32 handling correctly. According to the ABI, we
|
||||
* should do a value offset, like this:
|
||||
* if (in_init(me, (void *)val))
|
||||
* val -= (uint32_t)me->module_init;
|
||||
* val -= (uint32_t)me->init_layout.base;
|
||||
* else
|
||||
* val -= (uint32_t)me->module_core;
|
||||
* val -= (uint32_t)me->core_layout.base;
|
||||
* However, SEGREL32 is used only for PARISC unwind entries, and we want
|
||||
* those entries to have an absolute address, and not just an offset.
|
||||
*
|
||||
|
@ -100,14 +100,14 @@
|
|||
* or init pieces the location is */
|
||||
static inline int in_init(struct module *me, void *loc)
|
||||
{
|
||||
return (loc >= me->module_init &&
|
||||
loc <= (me->module_init + me->init_size));
|
||||
return (loc >= me->init_layout.base &&
|
||||
loc <= (me->init_layout.base + me->init_layout.size));
|
||||
}
|
||||
|
||||
static inline int in_core(struct module *me, void *loc)
|
||||
{
|
||||
return (loc >= me->module_core &&
|
||||
loc <= (me->module_core + me->core_size));
|
||||
return (loc >= me->core_layout.base &&
|
||||
loc <= (me->core_layout.base + me->core_layout.size));
|
||||
}
|
||||
|
||||
static inline int in_local(struct module *me, void *loc)
|
||||
|
@ -367,13 +367,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
|
|||
}
|
||||
|
||||
/* align things a bit */
|
||||
me->core_size = ALIGN(me->core_size, 16);
|
||||
me->arch.got_offset = me->core_size;
|
||||
me->core_size += gots * sizeof(struct got_entry);
|
||||
me->core_layout.size = ALIGN(me->core_layout.size, 16);
|
||||
me->arch.got_offset = me->core_layout.size;
|
||||
me->core_layout.size += gots * sizeof(struct got_entry);
|
||||
|
||||
me->core_size = ALIGN(me->core_size, 16);
|
||||
me->arch.fdesc_offset = me->core_size;
|
||||
me->core_size += fdescs * sizeof(Elf_Fdesc);
|
||||
me->core_layout.size = ALIGN(me->core_layout.size, 16);
|
||||
me->arch.fdesc_offset = me->core_layout.size;
|
||||
me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
|
||||
|
||||
me->arch.got_max = gots;
|
||||
me->arch.fdesc_max = fdescs;
|
||||
|
@ -391,7 +391,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
|
|||
|
||||
BUG_ON(value == 0);
|
||||
|
||||
got = me->module_core + me->arch.got_offset;
|
||||
got = me->core_layout.base + me->arch.got_offset;
|
||||
for (i = 0; got[i].addr; i++)
|
||||
if (got[i].addr == value)
|
||||
goto out;
|
||||
|
@ -409,7 +409,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
|
|||
#ifdef CONFIG_64BIT
|
||||
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
|
||||
{
|
||||
Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
|
||||
Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
|
||||
|
||||
if (!value) {
|
||||
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
|
||||
|
@ -427,7 +427,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
|
|||
|
||||
/* Create new one */
|
||||
fdesc->addr = value;
|
||||
fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
|
||||
fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
|
||||
return (Elf_Addr)fdesc;
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
@ -839,7 +839,7 @@ register_unwind_table(struct module *me,
|
|||
|
||||
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
|
||||
end = table + sechdrs[me->arch.unwind_section].sh_size;
|
||||
gp = (Elf_Addr)me->module_core + me->arch.got_offset;
|
||||
gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
|
||||
|
||||
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
|
||||
me->arch.unwind_section, table, end, gp);
|
||||
|
|
|
@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
|
|||
printk(KERN_INFO "The 32-bit Kernel has started...\n");
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
|
||||
printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
|
||||
(int)(PAGE_SIZE / 1024));
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
|
||||
1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
|
||||
#else
|
||||
printk(KERN_CONT "disabled");
|
||||
#endif
|
||||
printk(KERN_CONT ".\n");
|
||||
|
||||
|
||||
pdc_console_init();
|
||||
|
||||
|
@ -377,6 +386,7 @@ arch_initcall(parisc_init);
|
|||
void start_parisc(void)
|
||||
{
|
||||
extern void start_kernel(void);
|
||||
extern void early_trap_init(void);
|
||||
|
||||
int ret, cpunum;
|
||||
struct pdc_coproc_cfg coproc_cfg;
|
||||
|
@ -397,6 +407,8 @@ void start_parisc(void)
|
|||
panic("must have an fpu to boot linux");
|
||||
}
|
||||
|
||||
early_trap_init(); /* initialize checksum of fault_vector */
|
||||
|
||||
start_kernel();
|
||||
// not reached
|
||||
}
|
||||
|
|
|
@ -369,7 +369,7 @@ tracesys_exit:
|
|||
ldo -16(%r30),%r29 /* Reference param save area */
|
||||
#endif
|
||||
ldo TASK_REGS(%r1),%r26
|
||||
bl do_syscall_trace_exit,%r2
|
||||
BL do_syscall_trace_exit,%r2
|
||||
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
|
||||
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
|
||||
LDREG TI_TASK(%r1), %r1
|
||||
|
@ -390,7 +390,7 @@ tracesys_sigexit:
|
|||
#ifdef CONFIG_64BIT
|
||||
ldo -16(%r30),%r29 /* Reference param save area */
|
||||
#endif
|
||||
bl do_syscall_trace_exit,%r2
|
||||
BL do_syscall_trace_exit,%r2
|
||||
ldo TASK_REGS(%r1),%r26
|
||||
|
||||
ldil L%syscall_exit_rfi,%r1
|
||||
|
|
|
@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
|
||||
int __init check_ivt(void *iva)
|
||||
void __init initialize_ivt(const void *iva)
|
||||
{
|
||||
extern u32 os_hpmc_size;
|
||||
extern const u32 os_hpmc[];
|
||||
|
@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
|
|||
u32 *hpmcp;
|
||||
u32 length;
|
||||
|
||||
if (strcmp((char *)iva, "cows can fly"))
|
||||
return -1;
|
||||
if (strcmp((const char *)iva, "cows can fly"))
|
||||
panic("IVT invalid");
|
||||
|
||||
ivap = (u32 *)iva;
|
||||
|
||||
|
@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
|
|||
check += ivap[i];
|
||||
|
||||
ivap[5] = -check;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* early_trap_init() is called before we set up kernel mappings and
|
||||
* write-protect the kernel */
|
||||
void __init early_trap_init(void)
|
||||
{
|
||||
extern const void fault_vector_20;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
extern const void fault_vector_11;
|
||||
extern const void fault_vector_11;
|
||||
initialize_ivt(&fault_vector_11);
|
||||
#endif
|
||||
extern const void fault_vector_20;
|
||||
|
||||
initialize_ivt(&fault_vector_20);
|
||||
}
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
void *iva;
|
||||
|
||||
if (boot_cpu_data.cpu_type >= pcxu)
|
||||
iva = (void *) &fault_vector_20;
|
||||
else
|
||||
#ifdef CONFIG_64BIT
|
||||
panic("Can't boot 64-bit OS on PA1.1 processor!");
|
||||
#else
|
||||
iva = (void *) &fault_vector_11;
|
||||
#endif
|
||||
|
||||
if (check_ivt(iva))
|
||||
panic("IVT invalid");
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ SECTIONS
|
|||
EXIT_DATA
|
||||
}
|
||||
PERCPU_SECTION(8)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. = ALIGN(HUGEPAGE_SIZE);
|
||||
__init_end = .;
|
||||
/* freed after init ends here */
|
||||
|
||||
|
@ -116,7 +116,7 @@ SECTIONS
|
|||
* that we can properly leave these
|
||||
* as writable
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. = ALIGN(HUGEPAGE_SIZE);
|
||||
data_start = .;
|
||||
|
||||
EXCEPTION_TABLE(8)
|
||||
|
@ -135,8 +135,11 @@ SECTIONS
|
|||
_edata = .;
|
||||
|
||||
/* BSS */
|
||||
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
|
||||
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
|
||||
|
||||
/* bootmap is allocated in setup_bootmem() directly behind bss. */
|
||||
|
||||
. = ALIGN(HUGEPAGE_SIZE);
|
||||
_end = . ;
|
||||
|
||||
STABS_DEBUG
|
||||
|
|
|
@ -3,3 +3,4 @@
|
|||
#
|
||||
|
||||
obj-y := init.o fault.o ioremap.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* PARISC64 Huge TLB page support.
|
||||
*
|
||||
* This parisc implementation is heavily based on the SPARC and x86 code.
|
||||
*
|
||||
* Copyright (C) 2015 Helge Deller <deller@gmx.de>
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <asm/mman.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
|
||||
unsigned long
|
||||
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (len > TASK_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED)
|
||||
if (prepare_hugepage_range(file, addr, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (addr)
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
|
||||
/* we need to make sure the colouring is OK */
|
||||
return arch_get_unmapped_area(file, addr, len, pgoff, flags);
|
||||
}
|
||||
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
/* We must align the address, because our caller will run
|
||||
* set_huge_pte_at() on whatever we return, which writes out
|
||||
* all of the sub-ptes for the hugepage range. So we have
|
||||
* to give it the first such sub-pte.
|
||||
*/
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_alloc_map(mm, NULL, pmd, addr);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_none(*pgd)) {
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
/* Purge data and instruction TLB entries. Must be called holding
|
||||
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
|
||||
* machines since the purge must be broadcast to all CPUs.
|
||||
*/
|
||||
static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
|
||||
* Linux standard huge pages (e.g. 2 MB) */
|
||||
BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
|
||||
|
||||
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
|
||||
mtsp(mm->context, 1);
|
||||
pdtlb(addr);
|
||||
if (unlikely(split_tlb))
|
||||
pitlb(addr);
|
||||
addr += (1UL << REAL_HPAGE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t entry)
|
||||
{
|
||||
unsigned long addr_start;
|
||||
int i;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
addr_start = addr;
|
||||
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
/* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
|
||||
* instead, but then we get double locking on pa_tlb_lock. */
|
||||
*ptep = entry;
|
||||
ptep++;
|
||||
|
||||
/* Drop the PAGE_SIZE/non-huge tlb entry */
|
||||
purge_tlb_entries(mm, addr);
|
||||
|
||||
addr += PAGE_SIZE;
|
||||
pte_val(entry) += PAGE_SIZE;
|
||||
}
|
||||
|
||||
purge_tlb_entries_huge(mm, addr_start);
|
||||
}
|
||||
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
entry = *ptep;
|
||||
set_huge_pte_at(mm, addr, ptep, __pte(0));
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
|
@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
|
|||
unsigned long vaddr;
|
||||
unsigned long ro_start;
|
||||
unsigned long ro_end;
|
||||
unsigned long fv_addr;
|
||||
unsigned long gw_addr;
|
||||
extern const unsigned long fault_vector_20;
|
||||
extern void * const linux_gateway_page;
|
||||
unsigned long kernel_end;
|
||||
|
||||
ro_start = __pa((unsigned long)_text);
|
||||
ro_end = __pa((unsigned long)&data_start);
|
||||
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
|
||||
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
|
||||
kernel_end = __pa((unsigned long)&_end);
|
||||
|
||||
end_paddr = start_paddr + size;
|
||||
|
||||
|
@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
|
|||
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
|
||||
pte_t pte;
|
||||
|
||||
/*
|
||||
* Map the fault vector writable so we can
|
||||
* write the HPMC checksum.
|
||||
*/
|
||||
if (force)
|
||||
pte = __mk_pte(address, pgprot);
|
||||
else if (parisc_text_address(vaddr) &&
|
||||
address != fv_addr)
|
||||
else if (parisc_text_address(vaddr)) {
|
||||
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
|
||||
if (address >= ro_start && address < kernel_end)
|
||||
pte = pte_mkhuge(pte);
|
||||
}
|
||||
else
|
||||
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
if (address >= ro_start && address < ro_end
|
||||
&& address != fv_addr
|
||||
&& address != gw_addr)
|
||||
pte = __mk_pte(address, PAGE_KERNEL_RO);
|
||||
else
|
||||
if (address >= ro_start && address < ro_end) {
|
||||
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
|
||||
pte = pte_mkhuge(pte);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
pte = __mk_pte(address, pgprot);
|
||||
if (address >= ro_start && address < kernel_end)
|
||||
pte = pte_mkhuge(pte);
|
||||
}
|
||||
|
||||
if (address >= end_paddr) {
|
||||
if (force)
|
||||
|
@ -536,15 +533,12 @@ void free_initmem(void)
|
|||
|
||||
/* force the kernel to see the new TLB entries */
|
||||
__flush_tlb_range(0, init_begin, init_end);
|
||||
/* Attempt to catch anyone trying to execute code here
|
||||
* by filling the page with BRK insns.
|
||||
*/
|
||||
memset((void *)init_begin, 0x00, init_end - init_begin);
|
||||
|
||||
/* finally dump all the instructions which were cached, since the
|
||||
* pages are no-longer executable */
|
||||
flush_icache_range(init_begin, init_end);
|
||||
|
||||
free_initmem_default(-1);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
|
||||
/* set up a new led state on systems shipped LED State panel */
|
||||
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
|
||||
|
@ -728,8 +722,8 @@ static void __init pagetable_init(void)
|
|||
unsigned long size;
|
||||
|
||||
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
|
||||
end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
|
||||
size = pmem_ranges[range].pages << PAGE_SHIFT;
|
||||
end_paddr = start_paddr + size;
|
||||
|
||||
map_pages((unsigned long)__va(start_paddr), start_paddr,
|
||||
size, PAGE_KERNEL, 0);
|
||||
|
|
|
@ -382,3 +382,4 @@ COMPAT_SYS(shmat)
|
|||
SYSCALL(shmdt)
|
||||
SYSCALL(shmget)
|
||||
COMPAT_SYS(shmctl)
|
||||
SYSCALL(mlock2)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 378
|
||||
#define __NR_syscalls 379
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
|
|
@ -400,5 +400,6 @@
|
|||
#define __NR_shmdt 375
|
||||
#define __NR_shmget 376
|
||||
#define __NR_shmctl 377
|
||||
#define __NR_mlock2 378
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location,
|
|||
|
||||
pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
|
||||
/* Init, or core PLT? */
|
||||
if (location >= mod->module_core
|
||||
&& location < mod->module_core + mod->core_size)
|
||||
if (location >= mod->core_layout.base
|
||||
&& location < mod->core_layout.base + mod->core_layout.size)
|
||||
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
|
||||
else
|
||||
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
|
||||
|
@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
|||
}
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
module->arch.tramp =
|
||||
do_plt_call(module->module_core,
|
||||
do_plt_call(module->core_layout.base,
|
||||
(unsigned long)ftrace_caller,
|
||||
sechdrs, module);
|
||||
#endif
|
||||
|
|
|
@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
|||
|
||||
/* Increase core size by size of got & plt and set start
|
||||
offsets for got and plt. */
|
||||
me->core_size = ALIGN(me->core_size, 4);
|
||||
me->arch.got_offset = me->core_size;
|
||||
me->core_size += me->arch.got_size;
|
||||
me->arch.plt_offset = me->core_size;
|
||||
me->core_size += me->arch.plt_size;
|
||||
me->core_layout.size = ALIGN(me->core_layout.size, 4);
|
||||
me->arch.got_offset = me->core_layout.size;
|
||||
me->core_layout.size += me->arch.got_size;
|
||||
me->arch.plt_offset = me->core_layout.size;
|
||||
me->core_layout.size += me->arch.plt_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|||
if (info->got_initialized == 0) {
|
||||
Elf_Addr *gotent;
|
||||
|
||||
gotent = me->module_core + me->arch.got_offset +
|
||||
gotent = me->core_layout.base + me->arch.got_offset +
|
||||
info->got_offset;
|
||||
*gotent = val;
|
||||
info->got_initialized = 1;
|
||||
|
@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|||
rc = apply_rela_bits(loc, val, 0, 64, 0);
|
||||
else if (r_type == R_390_GOTENT ||
|
||||
r_type == R_390_GOTPLTENT) {
|
||||
val += (Elf_Addr) me->module_core - loc;
|
||||
val += (Elf_Addr) me->core_layout.base - loc;
|
||||
rc = apply_rela_bits(loc, val, 1, 32, 1);
|
||||
}
|
||||
break;
|
||||
|
@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|||
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
|
||||
if (info->plt_initialized == 0) {
|
||||
unsigned int *ip;
|
||||
ip = me->module_core + me->arch.plt_offset +
|
||||
ip = me->core_layout.base + me->arch.plt_offset +
|
||||
info->plt_offset;
|
||||
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
|
||||
ip[1] = 0x100a0004;
|
||||
|
@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|||
val - loc + 0xffffUL < 0x1ffffeUL) ||
|
||||
(r_type == R_390_PLT32DBL &&
|
||||
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
|
||||
val = (Elf_Addr) me->module_core +
|
||||
val = (Elf_Addr) me->core_layout.base +
|
||||
me->arch.plt_offset +
|
||||
info->plt_offset;
|
||||
val += rela->r_addend - loc;
|
||||
|
@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|||
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
|
||||
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
|
||||
val = val + rela->r_addend -
|
||||
((Elf_Addr) me->module_core + me->arch.got_offset);
|
||||
((Elf_Addr) me->core_layout.base + me->arch.got_offset);
|
||||
if (r_type == R_390_GOTOFF16)
|
||||
rc = apply_rela_bits(loc, val, 0, 16, 0);
|
||||
else if (r_type == R_390_GOTOFF32)
|
||||
|
@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|||
break;
|
||||
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
|
||||
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
|
||||
val = (Elf_Addr) me->module_core + me->arch.got_offset +
|
||||
val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
|
||||
rela->r_addend - loc;
|
||||
if (r_type == R_390_GOTPC)
|
||||
rc = apply_rela_bits(loc, val, 1, 32, 0);
|
||||
|
|
|
@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
|
|||
src_id, 0);
|
||||
|
||||
/* sending vcpu invalid */
|
||||
if (src_id >= KVM_MAX_VCPUS ||
|
||||
kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
|
||||
if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (sclp.has_sigpif)
|
||||
|
@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
|
|||
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
|
||||
irq->u.emerg.code, 0);
|
||||
|
||||
/* sending vcpu invalid */
|
||||
if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
|
||||
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
|
||||
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
||||
|
|
|
@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
|||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
if (MACHINE_HAS_VX) {
|
||||
mutex_lock(&kvm->lock);
|
||||
if (atomic_read(&kvm->online_vcpus)) {
|
||||
r = -EBUSY;
|
||||
} else if (MACHINE_HAS_VX) {
|
||||
set_kvm_facility(kvm->arch.model.fac->mask, 129);
|
||||
set_kvm_facility(kvm->arch.model.fac->list, 129);
|
||||
r = 0;
|
||||
} else
|
||||
r = -EINVAL;
|
||||
mutex_unlock(&kvm->lock);
|
||||
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
|
||||
r ? "(not available)" : "(success)");
|
||||
break;
|
||||
|
|
|
@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
|||
|
||||
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
||||
|
||||
if (!MACHINE_HAS_PFMF)
|
||||
if (!test_kvm_facility(vcpu->kvm, 8))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
|
|
|
@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
|
|||
u16 cpu_addr, u32 parameter, u64 *status_reg)
|
||||
{
|
||||
int rc;
|
||||
struct kvm_vcpu *dst_vcpu;
|
||||
struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
|
@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
|
|||
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
|
||||
|
||||
if (order_code == SIGP_EXTERNAL_CALL) {
|
||||
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
|
||||
BUG_ON(dest_vcpu == NULL);
|
||||
|
||||
kvm_s390_vcpu_wakeup(dest_vcpu);
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#define MSR_IA32_PERFCTR0 0x000000c1
|
||||
#define MSR_IA32_PERFCTR1 0x000000c2
|
||||
#define MSR_FSB_FREQ 0x000000cd
|
||||
#define MSR_NHM_PLATFORM_INFO 0x000000ce
|
||||
#define MSR_PLATFORM_INFO 0x000000ce
|
||||
|
||||
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
|
||||
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
|
||||
|
@ -44,7 +44,6 @@
|
|||
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
|
||||
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
|
||||
|
||||
#define MSR_PLATFORM_INFO 0x000000ce
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
|
|
@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
|
|||
|
||||
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned long eflags;
|
||||
unsigned long eflags = native_save_fl();
|
||||
|
||||
/* This should have been cleared long ago */
|
||||
raw_local_save_flags(eflags);
|
||||
BUG_ON(eflags & X86_EFLAGS_AC);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_SMAP)) {
|
||||
|
|
|
@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
|
|||
*/
|
||||
void fpu__init_prepare_fx_sw_frame(void)
|
||||
{
|
||||
int fsave_header_size = sizeof(struct fregs_state);
|
||||
int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
|
||||
|
||||
if (config_enabled(CONFIG_X86_32))
|
||||
size += fsave_header_size;
|
||||
|
||||
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
|
||||
fx_sw_reserved.extended_size = size;
|
||||
fx_sw_reserved.xfeatures = xfeatures_mask;
|
||||
fx_sw_reserved.xstate_size = xstate_size;
|
||||
|
||||
if (config_enabled(CONFIG_IA32_EMULATION)) {
|
||||
if (config_enabled(CONFIG_IA32_EMULATION) ||
|
||||
config_enabled(CONFIG_X86_32)) {
|
||||
int fsave_header_size = sizeof(struct fregs_state);
|
||||
|
||||
fx_sw_reserved_ia32 = fx_sw_reserved;
|
||||
fx_sw_reserved_ia32.extended_size += fsave_header_size;
|
||||
fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
|
|||
if (!boot_cpu_has(X86_FEATURE_XSAVE))
|
||||
return NULL;
|
||||
|
||||
xsave = ¤t->thread.fpu.state.xsave;
|
||||
/*
|
||||
* We should not ever be requesting features that we
|
||||
* have not enabled. Remember that pcntxt_mask is
|
||||
|
|
|
@ -41,8 +41,8 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
|
|||
int ret, numpages, size = 4;
|
||||
bool readonly;
|
||||
unsigned long val;
|
||||
unsigned long core = (unsigned long)mod->module_core;
|
||||
unsigned long core_size = mod->core_size;
|
||||
unsigned long core = (unsigned long)mod->core_layout.base;
|
||||
unsigned long core_size = mod->core_layout.size;
|
||||
|
||||
switch (type) {
|
||||
case R_X86_64_NONE:
|
||||
|
@ -72,7 +72,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
|
|||
readonly = false;
|
||||
|
||||
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
|
||||
if (loc < core + mod->core_ro_size)
|
||||
if (loc < core + mod->core_layout.ro_size)
|
||||
readonly = true;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -278,6 +278,12 @@ trace:
|
|||
/* save_mcount_regs fills in first two parameters */
|
||||
save_mcount_regs
|
||||
|
||||
/*
|
||||
* When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
|
||||
* set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
|
||||
* ip and parent ip are used and the list function is called when
|
||||
* function tracing is enabled.
|
||||
*/
|
||||
call *ftrace_trace_function
|
||||
|
||||
restore_mcount_regs
|
||||
|
|
|
@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
|
|||
|
||||
switch (type) {
|
||||
case VMX_VPID_EXTENT_ALL_CONTEXT:
|
||||
if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
||||
return 1;
|
||||
}
|
||||
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
|
||||
nested_vmx_succeed(vcpu);
|
||||
break;
|
||||
|
|
|
@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (!lapic_in_kernel(vcpu) ||
|
||||
kvm_apic_accept_pic_intr(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* if userspace requested an interrupt window, check that the
|
||||
* interrupt window is open.
|
||||
*
|
||||
* No need to exit to userspace if we already have an interrupt queued.
|
||||
*/
|
||||
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_arch_interrupt_allowed(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu) &&
|
||||
!kvm_event_needs_reinjection(vcpu) &&
|
||||
kvm_cpu_accept_dm_intr(vcpu);
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
||||
struct kvm_interrupt *irq)
|
||||
{
|
||||
|
@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
|||
return -EEXIST;
|
||||
|
||||
vcpu->arch.pending_external_vector = irq->irq;
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
|
|||
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if userspace requested an interrupt window, and that the
|
||||
* interrupt window is open.
|
||||
*
|
||||
* No need to exit to userspace if we already have an interrupt queued.
|
||||
*/
|
||||
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
|
||||
return false;
|
||||
|
||||
if (kvm_cpu_has_interrupt(vcpu))
|
||||
return false;
|
||||
|
||||
return (irqchip_split(vcpu->kvm)
|
||||
? kvm_apic_accept_pic_intr(vcpu)
|
||||
: kvm_arch_interrupt_allowed(vcpu));
|
||||
return vcpu->run->request_interrupt_window &&
|
||||
likely(!pic_in_kernel(vcpu->kvm));
|
||||
}
|
||||
|
||||
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
|
@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
|||
kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
|
||||
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
||||
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
kvm_arch_interrupt_allowed(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu) &&
|
||||
!kvm_event_needs_reinjection(vcpu);
|
||||
else if (!pic_in_kernel(vcpu->kvm))
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
kvm_apic_accept_pic_intr(vcpu) &&
|
||||
!kvm_cpu_has_interrupt(vcpu);
|
||||
else
|
||||
kvm_run->ready_for_interrupt_injection = 1;
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
pic_in_kernel(vcpu->kvm) ||
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
||||
}
|
||||
|
||||
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
||||
|
@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
|||
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
bool req_int_win = !lapic_in_kernel(vcpu) &&
|
||||
vcpu->run->request_interrupt_window;
|
||||
bool req_int_win =
|
||||
dm_request_for_irq_injection(vcpu) &&
|
||||
kvm_cpu_accept_dm_intr(vcpu);
|
||||
|
||||
bool req_immediate_exit = false;
|
||||
|
||||
if (vcpu->requests) {
|
||||
|
@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (kvm_cpu_has_pending_timer(vcpu))
|
||||
kvm_inject_pending_timer_irqs(vcpu);
|
||||
|
||||
if (dm_request_for_irq_injection(vcpu)) {
|
||||
if (dm_request_for_irq_injection(vcpu) &&
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
|
||||
r = 0;
|
||||
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
|
||||
++vcpu->stat.request_irq_exits;
|
||||
|
|
|
@ -585,6 +585,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
|
|||
return bt_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only want to do a 4-byte get_user() on 32-bit. Otherwise,
|
||||
* we might run off the end of the bounds table if we are on
|
||||
* a 64-bit kernel and try to get 8 bytes.
|
||||
*/
|
||||
int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
long __user *bd_entry_ptr)
|
||||
{
|
||||
u32 bd_entry_32;
|
||||
int ret;
|
||||
|
||||
if (is_64bit_mm(mm))
|
||||
return get_user(*bd_entry_ret, bd_entry_ptr);
|
||||
|
||||
/*
|
||||
* Note that get_user() uses the type of the *pointer* to
|
||||
* establish the size of the get, not the destination.
|
||||
*/
|
||||
ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
|
||||
*bd_entry_ret = bd_entry_32;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the base of bounds tables pointed by specific bounds
|
||||
* directory entry.
|
||||
|
@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
|
|||
int need_write = 0;
|
||||
|
||||
pagefault_disable();
|
||||
ret = get_user(bd_entry, bd_entry_ptr);
|
||||
ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
|
||||
pagefault_enable();
|
||||
if (!ret)
|
||||
break;
|
||||
|
@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
|
|||
*/
|
||||
static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
|
||||
if (is_64bit_mm(mm))
|
||||
return virt_space / MPX_BD_NR_ENTRIES_64;
|
||||
else
|
||||
return virt_space / MPX_BD_NR_ENTRIES_32;
|
||||
unsigned long long virt_space;
|
||||
unsigned long long GB = (1ULL << 30);
|
||||
|
||||
/*
|
||||
* This covers 32-bit emulation as well as 32-bit kernels
|
||||
* running on 64-bit harware.
|
||||
*/
|
||||
if (!is_64bit_mm(mm))
|
||||
return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
|
||||
|
||||
/*
|
||||
* 'x86_virt_bits' returns what the hardware is capable
|
||||
* of, and returns the full >32-bit adddress space when
|
||||
* running 32-bit kernels on 64-bit hardware.
|
||||
*/
|
||||
virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
|
||||
return virt_space / MPX_BD_NR_ENTRIES_64;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
struct bio_vec bv, bvprv, *bvprvp = NULL;
|
||||
struct bvec_iter iter;
|
||||
unsigned seg_size = 0, nsegs = 0, sectors = 0;
|
||||
unsigned front_seg_size = bio->bi_seg_front_size;
|
||||
bool do_split = true;
|
||||
struct bio *new = NULL;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
|
||||
|
@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
|
||||
seg_size += bv.bv_len;
|
||||
bvprv = bv;
|
||||
bvprvp = &bv;
|
||||
bvprvp = &bvprv;
|
||||
sectors += bv.bv_len >> 9;
|
||||
continue;
|
||||
}
|
||||
|
@ -108,16 +111,29 @@ new_segment:
|
|||
|
||||
nsegs++;
|
||||
bvprv = bv;
|
||||
bvprvp = &bv;
|
||||
bvprvp = &bvprv;
|
||||
seg_size = bv.bv_len;
|
||||
sectors += bv.bv_len >> 9;
|
||||
|
||||
if (nsegs == 1 && seg_size > front_seg_size)
|
||||
front_seg_size = seg_size;
|
||||
}
|
||||
|
||||
*segs = nsegs;
|
||||
return NULL;
|
||||
do_split = false;
|
||||
split:
|
||||
*segs = nsegs;
|
||||
return bio_split(bio, sectors, GFP_NOIO, bs);
|
||||
|
||||
if (do_split) {
|
||||
new = bio_split(bio, sectors, GFP_NOIO, bs);
|
||||
if (new)
|
||||
bio = new;
|
||||
}
|
||||
|
||||
bio->bi_seg_front_size = front_seg_size;
|
||||
if (seg_size > bio->bi_seg_back_size)
|
||||
bio->bi_seg_back_size = seg_size;
|
||||
|
||||
return do_split ? new : NULL;
|
||||
}
|
||||
|
||||
void blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||
|
@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
if (sg)
|
||||
sg_mark_end(sg);
|
||||
|
||||
/*
|
||||
* Something must have been wrong if the figured number of
|
||||
* segment is bigger than number of req's physical segments
|
||||
*/
|
||||
WARN_ON(nsegs > rq->nr_phys_segments);
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_map_sg);
|
||||
|
|
|
@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
blk_mq_bio_to_request(rq, bio);
|
||||
|
||||
/*
|
||||
* we do limited pluging. If bio can be merged, do merge.
|
||||
* We do limited pluging. If the bio can be merged, do that.
|
||||
* Otherwise the existing request in the plug list will be
|
||||
* issued. So the plug list will have one request at most
|
||||
*/
|
||||
if (plug) {
|
||||
/*
|
||||
* The plug list might get flushed before this. If that
|
||||
* happens, same_queue_rq is invalid and plug list is empty
|
||||
**/
|
||||
* happens, same_queue_rq is invalid and plug list is
|
||||
* empty
|
||||
*/
|
||||
if (same_queue_rq && !list_empty(&plug->mq_list)) {
|
||||
old_rq = same_queue_rq;
|
||||
list_del_init(&old_rq->queuelist);
|
||||
|
@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
blk_mq_bio_to_request(rq, bio);
|
||||
if (!request_count)
|
||||
trace_block_plug(q);
|
||||
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
||||
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
|
||||
if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
||||
blk_flush_plug_list(plug, false);
|
||||
trace_block_plug(q);
|
||||
}
|
||||
|
||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
return cookie;
|
||||
}
|
||||
|
||||
|
|
|
@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
|
|||
{
|
||||
if (blk_mark_rq_complete(req))
|
||||
return;
|
||||
blk_delete_timer(req);
|
||||
if (req->q->mq_ops)
|
||||
|
||||
if (req->q->mq_ops) {
|
||||
blk_mq_rq_timed_out(req, false);
|
||||
else
|
||||
} else {
|
||||
blk_delete_timer(req);
|
||||
blk_rq_timed_out(req);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_abort_request);
|
||||
|
||||
|
|
|
@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
|
|||
void __blk_queue_free_tags(struct request_queue *q);
|
||||
bool __blk_end_bidi_request(struct request *rq, int error,
|
||||
unsigned int nr_bytes, unsigned int bidi_bytes);
|
||||
int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
void blk_queue_exit(struct request_queue *q);
|
||||
void blk_freeze_queue(struct request_queue *q);
|
||||
|
||||
static inline void blk_queue_enter_live(struct request_queue *q)
|
||||
|
|
|
@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq,
|
|||
static int noop_dispatch(struct request_queue *q, int force)
|
||||
{
|
||||
struct noop_data *nd = q->elevator->elevator_data;
|
||||
struct request *rq;
|
||||
|
||||
if (!list_empty(&nd->queue)) {
|
||||
struct request *rq;
|
||||
rq = list_entry(nd->queue.next, struct request, queuelist);
|
||||
rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
|
||||
if (rq) {
|
||||
list_del_init(&rq->queuelist);
|
||||
elv_dispatch_sort(q, rq);
|
||||
return 1;
|
||||
|
@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq)
|
|||
|
||||
if (rq->queuelist.prev == &nd->queue)
|
||||
return NULL;
|
||||
return list_entry(rq->queuelist.prev, struct request, queuelist);
|
||||
return list_prev_entry(rq, queuelist);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
|
@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq)
|
|||
|
||||
if (rq->queuelist.next == &nd->queue)
|
||||
return NULL;
|
||||
return list_entry(rq->queuelist.next, struct request, queuelist);
|
||||
return list_next_entry(rq, queuelist);
|
||||
}
|
||||
|
||||
static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
|
||||
|
|
|
@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
|
|||
Sector sect;
|
||||
unsigned char *data;
|
||||
int slot, blocks_in_map;
|
||||
unsigned secsize;
|
||||
unsigned secsize, datasize, partoffset;
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
int found_root = 0;
|
||||
int found_root_goodness = 0;
|
||||
|
@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
|
|||
}
|
||||
secsize = be16_to_cpu(md->block_size);
|
||||
put_dev_sector(sect);
|
||||
data = read_part_sector(state, secsize/512, §);
|
||||
datasize = round_down(secsize, 512);
|
||||
data = read_part_sector(state, datasize / 512, §);
|
||||
if (!data)
|
||||
return -1;
|
||||
part = (struct mac_partition *) (data + secsize%512);
|
||||
partoffset = secsize % 512;
|
||||
if (partoffset + sizeof(*part) > datasize)
|
||||
return -1;
|
||||
part = (struct mac_partition *) (data + partoffset);
|
||||
if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
|
||||
put_dev_sector(sect);
|
||||
return 0; /* not a MacOS disk */
|
||||
|
|
|
@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
|
|||
obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
|
||||
|
||||
obj-$(CONFIG_PARPORT) += parport/
|
||||
obj-$(CONFIG_NVM) += lightnvm/
|
||||
obj-y += base/ block/ misc/ mfd/ nfc/
|
||||
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
|
||||
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
|
||||
|
@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/
|
|||
obj-y += macintosh/
|
||||
obj-$(CONFIG_IDE) += ide/
|
||||
obj-$(CONFIG_SCSI) += scsi/
|
||||
obj-$(CONFIG_NVM) += lightnvm/
|
||||
obj-y += nvme/
|
||||
obj-$(CONFIG_ATA) += ata/
|
||||
obj-$(CONFIG_TARGET_CORE) += target/
|
||||
|
|
|
@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
|
|||
|
||||
static int register_pcc_channel(int pcc_subspace_idx)
|
||||
{
|
||||
struct acpi_pcct_subspace *cppc_ss;
|
||||
struct acpi_pcct_hw_reduced *cppc_ss;
|
||||
unsigned int len;
|
||||
|
||||
if (pcc_subspace_idx >= 0) {
|
||||
|
|
|
@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
|||
}
|
||||
|
||||
err_exit:
|
||||
if (result && q)
|
||||
if (result)
|
||||
acpi_ec_delete_query(q);
|
||||
if (data)
|
||||
*data = value;
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dmi.h>
|
||||
#include "sbshc.h"
|
||||
|
||||
#define PREFIX "ACPI: "
|
||||
|
@ -30,6 +29,7 @@ struct acpi_smb_hc {
|
|||
u8 query_bit;
|
||||
smbus_alarm_callback callback;
|
||||
void *context;
|
||||
bool done;
|
||||
};
|
||||
|
||||
static int acpi_smbus_hc_add(struct acpi_device *device);
|
||||
|
@ -88,8 +88,6 @@ enum acpi_smb_offset {
|
|||
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
|
||||
};
|
||||
|
||||
static bool macbook;
|
||||
|
||||
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
|
||||
{
|
||||
return ec_read(hc->offset + address, data);
|
||||
|
@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
|
|||
return ec_write(hc->offset + address, data);
|
||||
}
|
||||
|
||||
static inline int smb_check_done(struct acpi_smb_hc *hc)
|
||||
{
|
||||
union acpi_smb_status status = {.raw = 0};
|
||||
smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
|
||||
return status.fields.done && (status.fields.status == SMBUS_OK);
|
||||
}
|
||||
|
||||
static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
|
||||
{
|
||||
if (wait_event_timeout(hc->wait, smb_check_done(hc),
|
||||
msecs_to_jiffies(timeout)))
|
||||
if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
|
||||
return 0;
|
||||
/*
|
||||
* After the timeout happens, OS will try to check the status of SMbus.
|
||||
* If the status is what OS expected, it will be regarded as the bogus
|
||||
* timeout.
|
||||
*/
|
||||
if (smb_check_done(hc))
|
||||
return 0;
|
||||
else
|
||||
return -ETIME;
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
|
||||
|
@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
|
|||
}
|
||||
|
||||
mutex_lock(&hc->lock);
|
||||
if (macbook)
|
||||
udelay(5);
|
||||
hc->done = false;
|
||||
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
|
||||
goto end;
|
||||
if (temp) {
|
||||
|
@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
|
|||
if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
|
||||
return 0;
|
||||
/* Check if it is only a completion notify */
|
||||
if (status.fields.done)
|
||||
if (status.fields.done && status.fields.status == SMBUS_OK) {
|
||||
hc->done = true;
|
||||
wake_up(&hc->wait);
|
||||
}
|
||||
if (!status.fields.alarm)
|
||||
return 0;
|
||||
mutex_lock(&hc->lock);
|
||||
|
@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
|||
acpi_handle handle, acpi_ec_query_func func,
|
||||
void *data);
|
||||
|
||||
static int macbook_dmi_match(const struct dmi_system_id *d)
|
||||
{
|
||||
pr_debug("Detected MacBook, enabling workaround\n");
|
||||
macbook = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dmi_system_id acpi_smbus_dmi_table[] = {
|
||||
{ macbook_dmi_match, "Apple MacBook", {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static int acpi_smbus_hc_add(struct acpi_device *device)
|
||||
{
|
||||
int status;
|
||||
unsigned long long val;
|
||||
struct acpi_smb_hc *hc;
|
||||
|
||||
dmi_check_system(acpi_smbus_dmi_table);
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
|
|||
struct wake_irq *wirq;
|
||||
int err;
|
||||
|
||||
if (irq < 0)
|
||||
return -EINVAL;
|
||||
|
||||
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
|
||||
if (!wirq)
|
||||
return -ENOMEM;
|
||||
|
@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
|||
struct wake_irq *wirq;
|
||||
int err;
|
||||
|
||||
if (irq < 0)
|
||||
return -EINVAL;
|
||||
|
||||
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
|
||||
if (!wirq)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
sector_t capacity;
|
||||
unsigned int index = 0;
|
||||
struct kobject *kobj;
|
||||
unsigned char thd_name[16];
|
||||
|
||||
if (dd->disk)
|
||||
goto skip_create_disk; /* hw init done, before rebuild */
|
||||
|
@ -3958,10 +3957,9 @@ skip_create_disk:
|
|||
}
|
||||
|
||||
start_service_thread:
|
||||
sprintf(thd_name, "mtip_svc_thd_%02d", index);
|
||||
dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
|
||||
dd, dd->numa_node, "%s",
|
||||
thd_name);
|
||||
dd, dd->numa_node,
|
||||
"mtip_svc_thd_%02d", index);
|
||||
|
||||
if (IS_ERR(dd->mtip_svc_handler)) {
|
||||
dev_err(&dd->pdev->dev, "service thread failed to start\n");
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/lightnvm.h>
|
||||
|
||||
struct nullb_cmd {
|
||||
struct list_head list;
|
||||
|
@ -39,12 +40,14 @@ struct nullb {
|
|||
|
||||
struct nullb_queue *queues;
|
||||
unsigned int nr_queues;
|
||||
char disk_name[DISK_NAME_LEN];
|
||||
};
|
||||
|
||||
static LIST_HEAD(nullb_list);
|
||||
static struct mutex lock;
|
||||
static int null_major;
|
||||
static int nullb_indexes;
|
||||
static struct kmem_cache *ppa_cache;
|
||||
|
||||
struct completion_queue {
|
||||
struct llist_head list;
|
||||
|
@ -119,6 +122,10 @@ static int nr_devices = 2;
|
|||
module_param(nr_devices, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
|
||||
|
||||
static bool use_lightnvm;
|
||||
module_param(use_lightnvm, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
|
||||
|
||||
static int irqmode = NULL_IRQ_SOFTIRQ;
|
||||
|
||||
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
|
||||
|
@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb)
|
|||
{
|
||||
list_del_init(&nullb->list);
|
||||
|
||||
del_gendisk(nullb->disk);
|
||||
if (use_lightnvm)
|
||||
nvm_unregister(nullb->disk_name);
|
||||
else
|
||||
del_gendisk(nullb->disk);
|
||||
blk_cleanup_queue(nullb->q);
|
||||
if (queue_mode == NULL_Q_MQ)
|
||||
blk_mq_free_tag_set(&nullb->tag_set);
|
||||
put_disk(nullb->disk);
|
||||
if (!use_lightnvm)
|
||||
put_disk(nullb->disk);
|
||||
cleanup_queues(nullb);
|
||||
kfree(nullb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVM
|
||||
|
||||
static void null_lnvm_end_io(struct request *rq, int error)
|
||||
{
|
||||
struct nvm_rq *rqd = rq->end_io_data;
|
||||
struct nvm_dev *dev = rqd->dev;
|
||||
|
||||
dev->mt->end_io(rqd, error);
|
||||
|
||||
blk_put_request(rq);
|
||||
}
|
||||
|
||||
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
|
||||
{
|
||||
struct request *rq;
|
||||
struct bio *bio = rqd->bio;
|
||||
|
||||
rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
|
||||
if (IS_ERR(rq))
|
||||
return -ENOMEM;
|
||||
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->__sector = bio->bi_iter.bi_sector;
|
||||
rq->ioprio = bio_prio(bio);
|
||||
|
||||
if (bio_has_data(bio))
|
||||
rq->nr_phys_segments = bio_phys_segments(q, bio);
|
||||
|
||||
rq->__data_len = bio->bi_iter.bi_size;
|
||||
rq->bio = rq->biotail = bio;
|
||||
|
||||
rq->end_io_data = rqd;
|
||||
|
||||
blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
|
||||
{
|
||||
sector_t size = gb * 1024 * 1024 * 1024ULL;
|
||||
sector_t blksize;
|
||||
struct nvm_id_group *grp;
|
||||
|
||||
id->ver_id = 0x1;
|
||||
id->vmnt = 0;
|
||||
id->cgrps = 1;
|
||||
id->cap = 0x3;
|
||||
id->dom = 0x1;
|
||||
|
||||
id->ppaf.blk_offset = 0;
|
||||
id->ppaf.blk_len = 16;
|
||||
id->ppaf.pg_offset = 16;
|
||||
id->ppaf.pg_len = 16;
|
||||
id->ppaf.sect_offset = 32;
|
||||
id->ppaf.sect_len = 8;
|
||||
id->ppaf.pln_offset = 40;
|
||||
id->ppaf.pln_len = 8;
|
||||
id->ppaf.lun_offset = 48;
|
||||
id->ppaf.lun_len = 8;
|
||||
id->ppaf.ch_offset = 56;
|
||||
id->ppaf.ch_len = 8;
|
||||
|
||||
do_div(size, bs); /* convert size to pages */
|
||||
do_div(size, 256); /* concert size to pgs pr blk */
|
||||
grp = &id->groups[0];
|
||||
grp->mtype = 0;
|
||||
grp->fmtype = 0;
|
||||
grp->num_ch = 1;
|
||||
grp->num_pg = 256;
|
||||
blksize = size;
|
||||
do_div(size, (1 << 16));
|
||||
grp->num_lun = size + 1;
|
||||
do_div(blksize, grp->num_lun);
|
||||
grp->num_blk = blksize;
|
||||
grp->num_pln = 1;
|
||||
|
||||
grp->fpg_sz = bs;
|
||||
grp->csecs = bs;
|
||||
grp->trdt = 25000;
|
||||
grp->trdm = 25000;
|
||||
grp->tprt = 500000;
|
||||
grp->tprm = 500000;
|
||||
grp->tbet = 1500000;
|
||||
grp->tbem = 1500000;
|
||||
grp->mpos = 0x010101; /* single plane rwe */
|
||||
grp->cpar = hw_queue_depth;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
|
||||
{
|
||||
mempool_t *virtmem_pool;
|
||||
|
||||
virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
|
||||
if (!virtmem_pool) {
|
||||
pr_err("null_blk: Unable to create virtual memory pool\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return virtmem_pool;
|
||||
}
|
||||
|
||||
static void null_lnvm_destroy_dma_pool(void *pool)
|
||||
{
|
||||
mempool_destroy(pool);
|
||||
}
|
||||
|
||||
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
|
||||
gfp_t mem_flags, dma_addr_t *dma_handler)
|
||||
{
|
||||
return mempool_alloc(pool, mem_flags);
|
||||
}
|
||||
|
||||
static void null_lnvm_dev_dma_free(void *pool, void *entry,
|
||||
dma_addr_t dma_handler)
|
||||
{
|
||||
mempool_free(entry, pool);
|
||||
}
|
||||
|
||||
static struct nvm_dev_ops null_lnvm_dev_ops = {
|
||||
.identity = null_lnvm_id,
|
||||
.submit_io = null_lnvm_submit_io,
|
||||
|
||||
.create_dma_pool = null_lnvm_create_dma_pool,
|
||||
.destroy_dma_pool = null_lnvm_destroy_dma_pool,
|
||||
.dev_dma_alloc = null_lnvm_dev_dma_alloc,
|
||||
.dev_dma_free = null_lnvm_dev_dma_free,
|
||||
|
||||
/* Simulate nvme protocol restriction */
|
||||
.max_phys_sect = 64,
|
||||
};
|
||||
#else
|
||||
static struct nvm_dev_ops null_lnvm_dev_ops;
|
||||
#endif /* CONFIG_NVM */
|
||||
|
||||
static int null_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
return 0;
|
||||
|
@ -575,11 +723,6 @@ static int null_add_dev(void)
|
|||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
|
||||
|
||||
disk = nullb->disk = alloc_disk_node(1, home_node);
|
||||
if (!disk) {
|
||||
rv = -ENOMEM;
|
||||
goto out_cleanup_blk_queue;
|
||||
}
|
||||
|
||||
mutex_lock(&lock);
|
||||
list_add_tail(&nullb->list, &nullb_list);
|
||||
|
@ -589,6 +732,21 @@ static int null_add_dev(void)
|
|||
blk_queue_logical_block_size(nullb->q, bs);
|
||||
blk_queue_physical_block_size(nullb->q, bs);
|
||||
|
||||
sprintf(nullb->disk_name, "nullb%d", nullb->index);
|
||||
|
||||
if (use_lightnvm) {
|
||||
rv = nvm_register(nullb->q, nullb->disk_name,
|
||||
&null_lnvm_dev_ops);
|
||||
if (rv)
|
||||
goto out_cleanup_blk_queue;
|
||||
goto done;
|
||||
}
|
||||
|
||||
disk = nullb->disk = alloc_disk_node(1, home_node);
|
||||
if (!disk) {
|
||||
rv = -ENOMEM;
|
||||
goto out_cleanup_lightnvm;
|
||||
}
|
||||
size = gb * 1024 * 1024 * 1024ULL;
|
||||
set_capacity(disk, size >> 9);
|
||||
|
||||
|
@ -598,10 +756,15 @@ static int null_add_dev(void)
|
|||
disk->fops = &null_fops;
|
||||
disk->private_data = nullb;
|
||||
disk->queue = nullb->q;
|
||||
sprintf(disk->disk_name, "nullb%d", nullb->index);
|
||||
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
|
||||
|
||||
add_disk(disk);
|
||||
done:
|
||||
return 0;
|
||||
|
||||
out_cleanup_lightnvm:
|
||||
if (use_lightnvm)
|
||||
nvm_unregister(nullb->disk_name);
|
||||
out_cleanup_blk_queue:
|
||||
blk_cleanup_queue(nullb->q);
|
||||
out_cleanup_tags:
|
||||
|
@ -625,6 +788,18 @@ static int __init null_init(void)
|
|||
bs = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (use_lightnvm && bs != 4096) {
|
||||
pr_warn("null_blk: LightNVM only supports 4k block size\n");
|
||||
pr_warn("null_blk: defaults block size to 4k\n");
|
||||
bs = 4096;
|
||||
}
|
||||
|
||||
if (use_lightnvm && queue_mode != NULL_Q_MQ) {
|
||||
pr_warn("null_blk: LightNVM only supported for blk-mq\n");
|
||||
pr_warn("null_blk: defaults queue mode to blk-mq\n");
|
||||
queue_mode = NULL_Q_MQ;
|
||||
}
|
||||
|
||||
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
|
||||
if (submit_queues < nr_online_nodes) {
|
||||
pr_warn("null_blk: submit_queues param is set to %u.",
|
||||
|
@ -655,15 +830,27 @@ static int __init null_init(void)
|
|||
if (null_major < 0)
|
||||
return null_major;
|
||||
|
||||
if (use_lightnvm) {
|
||||
ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
|
||||
0, 0, NULL);
|
||||
if (!ppa_cache) {
|
||||
pr_err("null_blk: unable to create ppa cache\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_devices; i++) {
|
||||
if (null_add_dev()) {
|
||||
unregister_blkdev(null_major, "nullb");
|
||||
return -EINVAL;
|
||||
goto err_ppa;
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("null: module loaded\n");
|
||||
return 0;
|
||||
err_ppa:
|
||||
kmem_cache_destroy(ppa_cache);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void __exit null_exit(void)
|
||||
|
@ -678,6 +865,8 @@ static void __exit null_exit(void)
|
|||
null_del_dev(nullb);
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
|
||||
kmem_cache_destroy(ppa_cache);
|
||||
}
|
||||
|
||||
module_init(null_init);
|
||||
|
|
|
@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
|
|||
return rv;
|
||||
}
|
||||
|
||||
static void start_check_enables(struct smi_info *smi_info)
|
||||
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
||||
{
|
||||
smi_info->last_timeout_jiffies = jiffies;
|
||||
mod_timer(&smi_info->si_timer, new_val);
|
||||
smi_info->timer_running = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start a new message and (re)start the timer and thread.
|
||||
*/
|
||||
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
|
||||
unsigned int size)
|
||||
{
|
||||
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
|
||||
|
||||
if (smi_info->thread)
|
||||
wake_up_process(smi_info->thread);
|
||||
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
|
||||
}
|
||||
|
||||
static void start_check_enables(struct smi_info *smi_info, bool start_timer)
|
||||
{
|
||||
unsigned char msg[2];
|
||||
|
||||
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
|
||||
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
|
||||
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
|
||||
if (start_timer)
|
||||
start_new_msg(smi_info, msg, 2);
|
||||
else
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
|
||||
smi_info->si_state = SI_CHECKING_ENABLES;
|
||||
}
|
||||
|
||||
static void start_clear_flags(struct smi_info *smi_info)
|
||||
static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
|
||||
{
|
||||
unsigned char msg[3];
|
||||
|
||||
|
@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
|
|||
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
|
||||
msg[2] = WDT_PRE_TIMEOUT_INT;
|
||||
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
|
||||
if (start_timer)
|
||||
start_new_msg(smi_info, msg, 3);
|
||||
else
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
|
||||
smi_info->si_state = SI_CLEARING_FLAGS;
|
||||
}
|
||||
|
||||
|
@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
|
|||
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
|
||||
smi_info->curr_msg->data_size = 2;
|
||||
|
||||
smi_info->handlers->start_transaction(
|
||||
smi_info->si_sm,
|
||||
smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
start_new_msg(smi_info, smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
smi_info->si_state = SI_GETTING_MESSAGES;
|
||||
}
|
||||
|
||||
|
@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
|
|||
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
|
||||
smi_info->curr_msg->data_size = 2;
|
||||
|
||||
smi_info->handlers->start_transaction(
|
||||
smi_info->si_sm,
|
||||
smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
start_new_msg(smi_info, smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
smi_info->si_state = SI_GETTING_EVENTS;
|
||||
}
|
||||
|
||||
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
||||
{
|
||||
smi_info->last_timeout_jiffies = jiffies;
|
||||
mod_timer(&smi_info->si_timer, new_val);
|
||||
smi_info->timer_running = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* When we have a situtaion where we run out of memory and cannot
|
||||
* allocate messages, we just leave them in the BMC and run the system
|
||||
|
@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
|||
* Note that we cannot just use disable_irq(), since the interrupt may
|
||||
* be shared.
|
||||
*/
|
||||
static inline bool disable_si_irq(struct smi_info *smi_info)
|
||||
static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
|
||||
{
|
||||
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
|
||||
smi_info->interrupt_disabled = true;
|
||||
start_check_enables(smi_info);
|
||||
start_check_enables(smi_info, start_timer);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
|
|||
{
|
||||
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
|
||||
smi_info->interrupt_disabled = false;
|
||||
start_check_enables(smi_info);
|
||||
start_check_enables(smi_info, true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
|
|||
|
||||
msg = ipmi_alloc_smi_msg();
|
||||
if (!msg) {
|
||||
if (!disable_si_irq(smi_info))
|
||||
if (!disable_si_irq(smi_info, true))
|
||||
smi_info->si_state = SI_NORMAL;
|
||||
} else if (enable_si_irq(smi_info)) {
|
||||
ipmi_free_smi_msg(msg);
|
||||
|
@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
|
|||
/* Watchdog pre-timeout */
|
||||
smi_inc_stat(smi_info, watchdog_pretimeouts);
|
||||
|
||||
start_clear_flags(smi_info);
|
||||
start_clear_flags(smi_info, true);
|
||||
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
|
||||
if (smi_info->intf)
|
||||
ipmi_smi_watchdog_pretimeout(smi_info->intf);
|
||||
|
@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
|
|||
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
|
||||
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
|
||||
|
||||
smi_info->handlers->start_transaction(
|
||||
smi_info->si_sm, msg, 2);
|
||||
start_new_msg(smi_info, msg, 2);
|
||||
smi_info->si_state = SI_GETTING_FLAGS;
|
||||
goto restart;
|
||||
}
|
||||
|
@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
|
|||
* disable and messages disabled.
|
||||
*/
|
||||
if (smi_info->supports_event_msg_buff || smi_info->irq) {
|
||||
start_check_enables(smi_info);
|
||||
start_check_enables(smi_info, true);
|
||||
} else {
|
||||
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
|
||||
if (!smi_info->curr_msg)
|
||||
|
@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
|
|||
}
|
||||
goto restart;
|
||||
}
|
||||
|
||||
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
|
||||
/* Ok it if fails, the timer will just go off. */
|
||||
if (del_timer(&smi_info->si_timer))
|
||||
smi_info->timer_running = false;
|
||||
}
|
||||
|
||||
out:
|
||||
return si_sm_result;
|
||||
}
|
||||
|
@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
|
|||
.data = (void *)(unsigned long) SI_BT },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, of_ipmi_match);
|
||||
|
||||
static int of_ipmi_probe(struct platform_device *dev)
|
||||
{
|
||||
|
@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
MODULE_DEVICE_TABLE(of, of_ipmi_match);
|
||||
#else
|
||||
#define of_ipmi_match NULL
|
||||
static int of_ipmi_probe(struct platform_device *dev)
|
||||
|
@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
|||
* Start clearing the flags before we enable interrupts or the
|
||||
* timer to avoid racing with the timer.
|
||||
*/
|
||||
start_clear_flags(new_smi);
|
||||
start_clear_flags(new_smi, false);
|
||||
|
||||
/*
|
||||
* IRQ is defined to be set when non-zero. req_events will
|
||||
|
@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
|
|||
poll(to_clean);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
disable_si_irq(to_clean);
|
||||
disable_si_irq(to_clean, false);
|
||||
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
|
||||
poll(to_clean);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
|
|
|
@ -153,6 +153,9 @@ static int timeout = 10;
|
|||
/* The pre-timeout is disabled by default. */
|
||||
static int pretimeout;
|
||||
|
||||
/* Default timeout to set on panic */
|
||||
static int panic_wdt_timeout = 255;
|
||||
|
||||
/* Default action is to reset the board on a timeout. */
|
||||
static unsigned char action_val = WDOG_TIMEOUT_RESET;
|
||||
|
||||
|
@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
|
|||
module_param(pretimeout, timeout, 0644);
|
||||
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
|
||||
|
||||
module_param(panic_wdt_timeout, timeout, 0644);
|
||||
MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
|
||||
|
||||
module_param_cb(action, ¶m_ops_str, action_op, 0644);
|
||||
MODULE_PARM_DESC(action, "Timeout action. One of: "
|
||||
"reset, none, power_cycle, power_off.");
|
||||
|
@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
|
|||
/* Make sure we do this only once. */
|
||||
panic_event_handled = 1;
|
||||
|
||||
timeout = 255;
|
||||
timeout = panic_wdt_timeout;
|
||||
pretimeout = 0;
|
||||
panic_halt_ipmi_set_timeout();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
menu "Clock Source drivers"
|
||||
depends on !ARCH_USES_GETTIMEOFFSET
|
||||
|
||||
config CLKSRC_OF
|
||||
bool
|
||||
|
|
|
@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
|
|||
int err;
|
||||
|
||||
ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
|
||||
ftm_writel(~0UL, priv->clkevt_base + FTM_MOD);
|
||||
ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
|
||||
|
||||
ftm_reset_counter(priv->clkevt_base);
|
||||
|
||||
|
@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
|
|||
int err;
|
||||
|
||||
ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
|
||||
ftm_writel(~0UL, priv->clksrc_base + FTM_MOD);
|
||||
ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
|
||||
|
||||
ftm_reset_counter(priv->clksrc_base);
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
|
|||
config ARM_MT8173_CPUFREQ
|
||||
bool "Mediatek MT8173 CPUFreq support"
|
||||
depends on ARCH_MEDIATEK && REGULATOR
|
||||
depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
|
||||
depends on !CPU_THERMAL || THERMAL=y
|
||||
select PM_OPP
|
||||
help
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
config X86_INTEL_PSTATE
|
||||
bool "Intel P state control"
|
||||
depends on X86
|
||||
select ACPI_PROCESSOR if ACPI
|
||||
help
|
||||
This driver provides a P state for Intel core processors.
|
||||
The driver implements an internal governor and will become
|
||||
|
|
|
@ -34,14 +34,10 @@
|
|||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
#include <acpi/processor.h>
|
||||
#endif
|
||||
|
||||
#define BYT_RATIOS 0x66a
|
||||
#define BYT_VIDS 0x66b
|
||||
#define BYT_TURBO_RATIOS 0x66c
|
||||
#define BYT_TURBO_VIDS 0x66d
|
||||
#define ATOM_RATIOS 0x66a
|
||||
#define ATOM_VIDS 0x66b
|
||||
#define ATOM_TURBO_RATIOS 0x66c
|
||||
#define ATOM_TURBO_VIDS 0x66d
|
||||
|
||||
#define FRAC_BITS 8
|
||||
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
|
||||
|
@ -117,9 +113,6 @@ struct cpudata {
|
|||
u64 prev_mperf;
|
||||
u64 prev_tsc;
|
||||
struct sample sample;
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
struct acpi_processor_performance acpi_perf_data;
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct cpudata **all_cpu_data;
|
||||
|
@ -150,7 +143,6 @@ struct cpu_defaults {
|
|||
static struct pstate_adjust_policy pid_params;
|
||||
static struct pstate_funcs pstate_funcs;
|
||||
static int hwp_active;
|
||||
static int no_acpi_perf;
|
||||
|
||||
struct perf_limits {
|
||||
int no_turbo;
|
||||
|
@ -163,8 +155,6 @@ struct perf_limits {
|
|||
int max_sysfs_pct;
|
||||
int min_policy_pct;
|
||||
int min_sysfs_pct;
|
||||
int max_perf_ctl;
|
||||
int min_perf_ctl;
|
||||
};
|
||||
|
||||
static struct perf_limits performance_limits = {
|
||||
|
@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
|
|||
.max_sysfs_pct = 100,
|
||||
.min_policy_pct = 0,
|
||||
.min_sysfs_pct = 0,
|
||||
.max_perf_ctl = 0,
|
||||
.min_perf_ctl = 0,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
||||
|
@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
|
|||
static struct perf_limits *limits = &powersave_limits;
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
/*
|
||||
* The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
|
||||
* in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
|
||||
* max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
|
||||
* ratio, out of it only high 8 bits are used. For example 0x1700 is setting
|
||||
* target ratio 0x17. The _PSS control value stores in a format which can be
|
||||
* directly written to PERF_CTL MSR. But in intel_pstate driver this shift
|
||||
* occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
|
||||
* This function converts the _PSS control value to intel pstate driver format
|
||||
* for comparison and assignment.
|
||||
*/
|
||||
static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
|
||||
{
|
||||
return cpu->acpi_perf_data.states[index].control >> 8;
|
||||
}
|
||||
|
||||
static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
int ret;
|
||||
bool turbo_absent = false;
|
||||
int max_pstate_index;
|
||||
int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
|
||||
int i;
|
||||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
|
||||
cpu->pstate.min_pstate, cpu->pstate.max_pstate,
|
||||
cpu->pstate.turbo_pstate);
|
||||
|
||||
if (!cpu->acpi_perf_data.shared_cpu_map &&
|
||||
zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
|
||||
GFP_KERNEL, cpu_to_node(policy->cpu))) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
|
||||
policy->cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Check if the control value in _PSS is for PERF_CTL MSR, which should
|
||||
* guarantee that the states returned by it map to the states in our
|
||||
* list directly.
|
||||
*/
|
||||
if (cpu->acpi_perf_data.control_register.space_id !=
|
||||
ACPI_ADR_SPACE_FIXED_HARDWARE)
|
||||
return -EIO;
|
||||
|
||||
pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
|
||||
for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
|
||||
pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
|
||||
(i == cpu->acpi_perf_data.state ? '*' : ' '), i,
|
||||
(u32) cpu->acpi_perf_data.states[i].core_frequency,
|
||||
(u32) cpu->acpi_perf_data.states[i].power,
|
||||
(u32) cpu->acpi_perf_data.states[i].control);
|
||||
|
||||
/*
|
||||
* If there is only one entry _PSS, simply ignore _PSS and continue as
|
||||
* usual without taking _PSS into account
|
||||
*/
|
||||
if (cpu->acpi_perf_data.state_count < 2)
|
||||
return 0;
|
||||
|
||||
turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
|
||||
min_pss_ctl = convert_to_native_pstate_format(cpu,
|
||||
cpu->acpi_perf_data.state_count - 1);
|
||||
/* Check if there is a turbo freq in _PSS */
|
||||
if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
|
||||
turbo_pss_ctl > cpu->pstate.min_pstate) {
|
||||
pr_debug("intel_pstate: no turbo range exists in _PSS\n");
|
||||
limits->no_turbo = limits->turbo_disabled = 1;
|
||||
cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
|
||||
turbo_absent = true;
|
||||
}
|
||||
|
||||
/* Check if the max non turbo p state < Intel P state max */
|
||||
max_pstate_index = turbo_absent ? 0 : 1;
|
||||
max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
|
||||
if (max_pss_ctl < cpu->pstate.max_pstate &&
|
||||
max_pss_ctl > cpu->pstate.min_pstate)
|
||||
cpu->pstate.max_pstate = max_pss_ctl;
|
||||
|
||||
/* check If min perf > Intel P State min */
|
||||
if (min_pss_ctl > cpu->pstate.min_pstate &&
|
||||
min_pss_ctl < cpu->pstate.max_pstate) {
|
||||
cpu->pstate.min_pstate = min_pss_ctl;
|
||||
policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
|
||||
}
|
||||
|
||||
if (turbo_absent)
|
||||
policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
|
||||
cpu->pstate.scaling;
|
||||
else {
|
||||
policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
|
||||
cpu->pstate.scaling;
|
||||
/*
|
||||
* The _PSS table doesn't contain whole turbo frequency range.
|
||||
* This just contains +1 MHZ above the max non turbo frequency,
|
||||
* with control value corresponding to max turbo ratio. But
|
||||
* when cpufreq set policy is called, it will call with this
|
||||
* max frequency, which will cause a reduced performance as
|
||||
* this driver uses real max turbo frequency as the max
|
||||
* frequeny. So correct this frequency in _PSS table to
|
||||
* correct max turbo frequency based on the turbo ratio.
|
||||
* Also need to convert to MHz as _PSS freq is in MHz.
|
||||
*/
|
||||
cpu->acpi_perf_data.states[0].core_frequency =
|
||||
turbo_pss_ctl * 100;
|
||||
}
|
||||
|
||||
pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
|
||||
cpu->pstate.min_pstate, cpu->pstate.max_pstate,
|
||||
cpu->pstate.turbo_pstate);
|
||||
pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
|
||||
policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
|
||||
if (!no_acpi_perf)
|
||||
return 0;
|
||||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
acpi_processor_unregister_performance(policy->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
|
||||
int deadband, int integral) {
|
||||
pid->setpoint = setpoint;
|
||||
|
@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
|
|||
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
|
||||
}
|
||||
|
||||
static int byt_get_min_pstate(void)
|
||||
static int atom_get_min_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_RATIOS, value);
|
||||
rdmsrl(ATOM_RATIOS, value);
|
||||
return (value >> 8) & 0x7F;
|
||||
}
|
||||
|
||||
static int byt_get_max_pstate(void)
|
||||
static int atom_get_max_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_RATIOS, value);
|
||||
rdmsrl(ATOM_RATIOS, value);
|
||||
return (value >> 16) & 0x7F;
|
||||
}
|
||||
|
||||
static int byt_get_turbo_pstate(void)
|
||||
static int atom_get_turbo_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_TURBO_RATIOS, value);
|
||||
rdmsrl(ATOM_TURBO_RATIOS, value);
|
||||
return value & 0x7F;
|
||||
}
|
||||
|
||||
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
||||
static void atom_set_pstate(struct cpudata *cpudata, int pstate)
|
||||
{
|
||||
u64 val;
|
||||
int32_t vid_fp;
|
||||
|
@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
|||
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
|
||||
}
|
||||
|
||||
#define BYT_BCLK_FREQS 5
|
||||
static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
|
||||
|
||||
static int byt_get_scaling(void)
|
||||
static int silvermont_get_scaling(void)
|
||||
{
|
||||
u64 value;
|
||||
int i;
|
||||
/* Defined in Table 35-6 from SDM (Sept 2015) */
|
||||
static int silvermont_freq_table[] = {
|
||||
83300, 100000, 133300, 116700, 80000};
|
||||
|
||||
rdmsrl(MSR_FSB_FREQ, value);
|
||||
i = value & 0x3;
|
||||
i = value & 0x7;
|
||||
WARN_ON(i > 4);
|
||||
|
||||
BUG_ON(i > BYT_BCLK_FREQS);
|
||||
|
||||
return byt_freq_table[i] * 100;
|
||||
return silvermont_freq_table[i];
|
||||
}
|
||||
|
||||
static void byt_get_vid(struct cpudata *cpudata)
|
||||
static int airmont_get_scaling(void)
|
||||
{
|
||||
u64 value;
|
||||
int i;
|
||||
/* Defined in Table 35-10 from SDM (Sept 2015) */
|
||||
static int airmont_freq_table[] = {
|
||||
83300, 100000, 133300, 116700, 80000,
|
||||
93300, 90000, 88900, 87500};
|
||||
|
||||
rdmsrl(MSR_FSB_FREQ, value);
|
||||
i = value & 0xF;
|
||||
WARN_ON(i > 8);
|
||||
|
||||
return airmont_freq_table[i];
|
||||
}
|
||||
|
||||
static void atom_get_vid(struct cpudata *cpudata)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_VIDS, value);
|
||||
rdmsrl(ATOM_VIDS, value);
|
||||
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
|
||||
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
|
||||
cpudata->vid.ratio = div_fp(
|
||||
|
@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
|
|||
int_tofp(cpudata->pstate.max_pstate -
|
||||
cpudata->pstate.min_pstate));
|
||||
|
||||
rdmsrl(BYT_TURBO_VIDS, value);
|
||||
rdmsrl(ATOM_TURBO_VIDS, value);
|
||||
cpudata->vid.turbo = value & 0x7f;
|
||||
}
|
||||
|
||||
|
@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct cpu_defaults byt_params = {
|
||||
static struct cpu_defaults silvermont_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
|
@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
|
|||
.i_gain_pct = 4,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = byt_get_max_pstate,
|
||||
.get_max_physical = byt_get_max_pstate,
|
||||
.get_min = byt_get_min_pstate,
|
||||
.get_turbo = byt_get_turbo_pstate,
|
||||
.set = byt_set_pstate,
|
||||
.get_scaling = byt_get_scaling,
|
||||
.get_vid = byt_get_vid,
|
||||
.get_max = atom_get_max_pstate,
|
||||
.get_max_physical = atom_get_max_pstate,
|
||||
.get_min = atom_get_min_pstate,
|
||||
.get_turbo = atom_get_turbo_pstate,
|
||||
.set = atom_set_pstate,
|
||||
.get_scaling = silvermont_get_scaling,
|
||||
.get_vid = atom_get_vid,
|
||||
},
|
||||
};
|
||||
|
||||
static struct cpu_defaults airmont_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
.setpoint = 60,
|
||||
.p_gain_pct = 14,
|
||||
.d_gain_pct = 0,
|
||||
.i_gain_pct = 4,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = atom_get_max_pstate,
|
||||
.get_max_physical = atom_get_max_pstate,
|
||||
.get_min = atom_get_min_pstate,
|
||||
.get_turbo = atom_get_turbo_pstate,
|
||||
.set = atom_set_pstate,
|
||||
.get_scaling = airmont_get_scaling,
|
||||
.get_vid = atom_get_vid,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
|||
* policy, or by cpu specific default values determined through
|
||||
* experimentation.
|
||||
*/
|
||||
if (limits->max_perf_ctl && limits->max_sysfs_pct >=
|
||||
limits->max_policy_pct) {
|
||||
*max = limits->max_perf_ctl;
|
||||
} else {
|
||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
|
||||
limits->max_perf));
|
||||
*max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
|
||||
cpu->pstate.turbo_pstate);
|
||||
}
|
||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
|
||||
*max = clamp_t(int, max_perf_adj,
|
||||
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
|
||||
|
||||
if (limits->min_perf_ctl) {
|
||||
*min = limits->min_perf_ctl;
|
||||
} else {
|
||||
min_perf = fp_toint(mul_fp(int_tofp(max_perf),
|
||||
limits->min_perf));
|
||||
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
|
||||
}
|
||||
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
|
||||
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
|
||||
}
|
||||
|
||||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
|
||||
|
@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
|
|||
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
||||
ICPU(0x2a, core_params),
|
||||
ICPU(0x2d, core_params),
|
||||
ICPU(0x37, byt_params),
|
||||
ICPU(0x37, silvermont_params),
|
||||
ICPU(0x3a, core_params),
|
||||
ICPU(0x3c, core_params),
|
||||
ICPU(0x3d, core_params),
|
||||
|
@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
ICPU(0x45, core_params),
|
||||
ICPU(0x46, core_params),
|
||||
ICPU(0x47, core_params),
|
||||
ICPU(0x4c, byt_params),
|
||||
ICPU(0x4c, airmont_params),
|
||||
ICPU(0x4e, core_params),
|
||||
ICPU(0x4f, core_params),
|
||||
ICPU(0x5e, core_params),
|
||||
|
@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
|
|||
|
||||
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
struct cpudata *cpu;
|
||||
int i;
|
||||
#endif
|
||||
pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
|
||||
policy->cpuinfo.max_freq, policy->max);
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
|
||||
int_tofp(100));
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
|
||||
int control;
|
||||
|
||||
control = convert_to_native_pstate_format(cpu, i);
|
||||
if (control * cpu->pstate.scaling == policy->max)
|
||||
limits->max_perf_ctl = control;
|
||||
if (control * cpu->pstate.scaling == policy->min)
|
||||
limits->min_perf_ctl = control;
|
||||
}
|
||||
|
||||
pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
|
||||
policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
|
||||
limits->max_perf_ctl);
|
||||
#endif
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_set();
|
||||
|
||||
|
@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||
policy->cpuinfo.max_freq =
|
||||
cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||
if (!no_acpi_perf)
|
||||
intel_pstate_init_perf_limits(policy);
|
||||
/*
|
||||
* If there is no acpi perf data or error, we ignore and use Intel P
|
||||
* state calculated limits, So this is not fatal error.
|
||||
*/
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
cpumask_set_cpu(policy->cpu, policy->cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
return intel_pstate_exit_perf_limits(policy);
|
||||
}
|
||||
|
||||
static struct cpufreq_driver intel_pstate_driver = {
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.verify = intel_pstate_verify_policy,
|
||||
.setpolicy = intel_pstate_set_policy,
|
||||
.get = intel_pstate_get,
|
||||
.init = intel_pstate_cpu_init,
|
||||
.exit = intel_pstate_cpu_exit,
|
||||
.stop_cpu = intel_pstate_stop_cpu,
|
||||
.name = "intel_pstate",
|
||||
};
|
||||
|
@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
|
|||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
#include <acpi/processor.h>
|
||||
|
||||
static bool intel_pstate_no_acpi_pss(void)
|
||||
{
|
||||
|
@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str)
|
|||
force_load = 1;
|
||||
if (!strcmp(str, "hwp_only"))
|
||||
hwp_only = 1;
|
||||
if (!strcmp(str, "no_acpi"))
|
||||
no_acpi_perf = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("intel_pstate", intel_pstate_setup);
|
||||
|
|
|
@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
|
|||
return NULL;
|
||||
|
||||
dev_info(chan2dev(chan),
|
||||
"%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, xt->src_start, xt->dst_start, xt->numf,
|
||||
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, &xt->src_start, &xt->dst_start, xt->numf,
|
||||
xt->frame_size, flags);
|
||||
|
||||
/*
|
||||
|
@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
u32 ctrla;
|
||||
u32 ctrlb;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
|
||||
dest, src, len, flags);
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
|
||||
&dest, &src, len, flags);
|
||||
|
||||
if (unlikely(!len)) {
|
||||
dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
|
||||
|
@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
void __iomem *vaddr;
|
||||
dma_addr_t paddr;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
|
||||
dest, value, len, flags);
|
||||
dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
|
||||
&dest, value, len, flags);
|
||||
|
||||
if (unlikely(!len)) {
|
||||
dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
|
||||
|
@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
|
|||
dma_addr_t dest = sg_dma_address(sg);
|
||||
size_t len = sg_dma_len(sg);
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n",
|
||||
__func__, dest, len);
|
||||
dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
|
||||
__func__, &dest, len);
|
||||
|
||||
if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
|
||||
dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
|
||||
|
@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
|||
unsigned int periods = buf_len / period_len;
|
||||
unsigned int i;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
|
||||
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
|
||||
buf_addr,
|
||||
&buf_addr,
|
||||
periods, buf_len, period_len);
|
||||
|
||||
if (unlikely(!atslave || !buf_len || !period_len)) {
|
||||
|
|
|
@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
|
|||
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
|
||||
{
|
||||
dev_crit(chan2dev(&atchan->chan_common),
|
||||
" desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
|
||||
lli->saddr, lli->daddr,
|
||||
lli->ctrla, lli->ctrlb, lli->dscr);
|
||||
" desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
|
||||
&lli->saddr, &lli->daddr,
|
||||
lli->ctrla, lli->ctrlb, &lli->dscr);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
|
|||
desc->lld.mbr_cfg = chan_cc;
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, desc->lld.mbr_sa, desc->lld.mbr_da,
|
||||
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
|
||||
desc->lld.mbr_ubc, desc->lld.mbr_cfg);
|
||||
|
||||
/* Chain lld. */
|
||||
|
@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
|||
if ((xt->numf > 1) && (xt->frame_size > 1))
|
||||
return NULL;
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, xt->src_start, xt->dst_start, xt->numf,
|
||||
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, &xt->src_start, &xt->dst_start, xt->numf,
|
||||
xt->frame_size, flags);
|
||||
|
||||
src_addr = xt->src_start;
|
||||
|
@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
|
|||
desc->lld.mbr_cfg = chan_cc;
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
|
||||
"%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
|
||||
desc->lld.mbr_cfg);
|
||||
|
||||
return desc;
|
||||
|
@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
struct at_xdmac_desc *desc;
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, dest, len, value, flags);
|
||||
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, &dest, len, value, flags);
|
||||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
|
@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
/* Prepare descriptors. */
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, sg_dma_address(sg), sg_dma_len(sg),
|
||||
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, &sg_dma_address(sg), sg_dma_len(sg),
|
||||
value, flags);
|
||||
desc = at_xdmac_memset_create_desc(chan, atchan,
|
||||
sg_dma_address(sg),
|
||||
|
|
|
@ -107,7 +107,7 @@
|
|||
|
||||
/* CCCFG register */
|
||||
#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
|
||||
#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */
|
||||
#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
|
||||
#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
|
||||
#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
|
||||
#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
|
||||
|
@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
|
|||
struct platform_device *tc_pdev;
|
||||
int ret;
|
||||
|
||||
if (!tc)
|
||||
if (!IS_ENABLED(CONFIG_OF) || !tc)
|
||||
return;
|
||||
|
||||
tc_pdev = of_find_device_by_node(tc->node);
|
||||
|
|
|
@ -1462,7 +1462,7 @@ err_firmware:
|
|||
|
||||
#define EVENT_REMAP_CELLS 3
|
||||
|
||||
static int __init sdma_event_remap(struct sdma_engine *sdma)
|
||||
static int sdma_event_remap(struct sdma_engine *sdma)
|
||||
{
|
||||
struct device_node *np = sdma->dev->of_node;
|
||||
struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue