arm64 updates for 5.5:
- ZONE_DMA32 initialisation fix when memblocks fall entirely within the first GB (used by ZONE_DMA in 5.5 for Raspberry Pi 4). - Couple of ftrace fixes following the FTRACE_WITH_REGS patchset. - access_ok() fix for the Tagged Address ABI when called from from a kernel thread (asynchronous I/O): the kthread does not have the TIF flags of the mm owner, so untag the user address unconditionally. - KVM compute_layout() called before the alternatives code patching. - Minor clean-ups. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAl3qdr8ACgkQa9axLQDI XvEKNA/9FWyqesV612qaFkhTFCg0f6byLP4wQf0ZqcZf74L5J6zeUCIc7Mqgdno3 jWROZNAW69gOYfwHxcBNyPVBpbAcx3k3WBBRWRJPnnJmeQk0oBLVdRziqdGKLxfw GhhWGsndnpxDOmyBJOWxZLemZjKFqYX3dhQ9Zi6pvkZgAeFEtIESw6S/iqf42nWL 1o/LgyQ5kjR6eto1OVW9QOQ83/TlXXlbsvoNwNFGghX1yHjQ6mZ3LITbiFdrbFlT FLLwsqlsPzDQcKagszTEHZTbXBf0RojKXWh3HKSEnmPwpacestvLJcKHTD9mtDmY Z+rLfyiolZmXoNU9LT6uGTzVD4cRWfzz6eHSYHiufM1UztGSV+dOhIqfPuEOLEu3 2Xf8sKmQMtuICgbol6Q6ISrjXKH/UNvK2CuuuJSNmOHDlyHKvNfJtoyEhZ5rHUpT HQy0qxDCEU7rFCP7clOD/94EGA8gYrV8j5NauY8/VsLpRCMBwoLNglI049qJydaZ jL9dPxo+GG7kh7S8VmYwBKtPhqlDNFCzw/HmBBURFhkM1j0nCNt5dKHx+kdLNurg nbzRvJ+W42eDze2lmVf33eOfrAy2MfcGr+VuJ5QdmL30bQENCemPrreIy+VnVVR8 ydeK3lyknJjmX4q8a5o/URsAKvk13crwimNPa0OSoYzDKmWd8SA= =vhnZ -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Catalin Marinas: - ZONE_DMA32 initialisation fix when memblocks fall entirely within the first GB (used by ZONE_DMA in 5.5 for Raspberry Pi 4). - Couple of ftrace fixes following the FTRACE_WITH_REGS patchset. - access_ok() fix for the Tagged Address ABI when called from from a kernel thread (asynchronous I/O): the kthread does not have the TIF flags of the mm owner, so untag the user address unconditionally. - KVM compute_layout() called before the alternatives code patching. - Minor clean-ups. * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: entry: refine comment of stack overflow check arm64: ftrace: fix ifdeffery arm64: KVM: Invoke compute_layout() before alternatives are applied arm64: Validate tagged addresses in access_ok() called from kernel threads arm64: mm: Fix column alignment for UXN in kernel_page_tables arm64: insn: consistently handle exit text arm64: mm: Fix initialisation of DMA zones on non-NUMA systems
This commit is contained in:
commit
9888428102
|
@ -91,6 +91,7 @@ alternative_cb_end
|
|||
|
||||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
void kvm_compute_layout(void);
|
||||
|
||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
|
|
|
@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[];
|
|||
extern char __idmap_text_start[], __idmap_text_end[];
|
||||
extern char __initdata_begin[], __initdata_end[];
|
||||
extern char __inittext_begin[], __inittext_end[];
|
||||
extern char __exittext_begin[], __exittext_end[];
|
||||
extern char __irqentry_text_start[], __irqentry_text_end[];
|
||||
extern char __mmuoff_data_start[], __mmuoff_data_end[];
|
||||
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
|
||||
|
|
|
@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
|
|||
{
|
||||
unsigned long ret, limit = current_thread_info()->addr_limit;
|
||||
|
||||
/*
|
||||
* Asynchronous I/O running in a kernel thread does not have the
|
||||
* TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
|
||||
* the user address before checking.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
|
||||
test_thread_flag(TIF_TAGGED_ADDR))
|
||||
(current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
|
||||
addr = untagged_addr(addr);
|
||||
|
||||
__chk_user_ptr(addr);
|
||||
|
|
|
@ -133,7 +133,6 @@ ENTRY(ftrace_graph_caller)
|
|||
bl prepare_ftrace_return
|
||||
b ftrace_common_return
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
#else
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||
|
@ -287,6 +286,7 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
|
|||
|
||||
mcount_exit
|
||||
ENDPROC(ftrace_caller)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/*
|
||||
|
@ -307,7 +307,6 @@ ENTRY(ftrace_graph_caller)
|
|||
mcount_exit
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
|
|
|
@ -76,7 +76,8 @@ alternative_else_nop_endif
|
|||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* Test whether the SP has overflowed, without corrupting a GPR.
|
||||
* Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
|
||||
* Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
|
||||
* should always be zero.
|
||||
*/
|
||||
add sp, sp, x0 // sp' = sp + x0
|
||||
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <asm/fixmap.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define AARCH64_INSN_SF_BIT BIT(31)
|
||||
#define AARCH64_INSN_N_BIT BIT(22)
|
||||
|
@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn)
|
|||
|
||||
static DEFINE_RAW_SPINLOCK(patch_lock);
|
||||
|
||||
static bool is_exit_text(unsigned long addr)
|
||||
{
|
||||
/* discarded with init text/data */
|
||||
return system_state < SYSTEM_RUNNING &&
|
||||
addr >= (unsigned long)__exittext_begin &&
|
||||
addr < (unsigned long)__exittext_end;
|
||||
}
|
||||
|
||||
static bool is_image_text(unsigned long addr)
|
||||
{
|
||||
return core_kernel_text(addr) || is_exit_text(addr);
|
||||
}
|
||||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
unsigned long uintaddr = (uintptr_t) addr;
|
||||
bool module = !core_kernel_text(uintaddr);
|
||||
bool image = is_image_text(uintaddr);
|
||||
struct page *page;
|
||||
|
||||
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else if (!module)
|
||||
if (image)
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/atomic.h>
|
||||
|
@ -39,6 +40,7 @@
|
|||
#include <asm/cputype.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -407,6 +409,8 @@ static void __init hyp_mode_check(void)
|
|||
"CPU: CPUs started in inconsistent modes");
|
||||
else
|
||||
pr_info("CPU: All CPU(s) started at EL1\n");
|
||||
if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
|
||||
kvm_compute_layout();
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
|
|
@ -158,9 +158,12 @@ SECTIONS
|
|||
__inittext_begin = .;
|
||||
|
||||
INIT_TEXT_SECTION(8)
|
||||
|
||||
__exittext_begin = .;
|
||||
.exit.text : {
|
||||
ARM_EXIT_KEEP(EXIT_TEXT)
|
||||
}
|
||||
__exittext_end = .;
|
||||
|
||||
. = ALIGN(4);
|
||||
.altinstructions : {
|
||||
|
|
|
@ -22,7 +22,7 @@ static u8 tag_lsb;
|
|||
static u64 tag_val;
|
||||
static u64 va_mask;
|
||||
|
||||
static void compute_layout(void)
|
||||
__init void kvm_compute_layout(void)
|
||||
{
|
||||
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
|
||||
u64 hyp_va_msb;
|
||||
|
@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
|
|||
|
||||
BUG_ON(nr_inst != 5);
|
||||
|
||||
if (!has_vhe() && !va_mask)
|
||||
compute_layout();
|
||||
|
||||
for (i = 0; i < nr_inst; i++) {
|
||||
u32 rd, rn, insn, oinsn;
|
||||
|
||||
|
@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
|
|||
return;
|
||||
}
|
||||
|
||||
if (!va_mask)
|
||||
compute_layout();
|
||||
|
||||
/*
|
||||
* Compute HYP VA by using the same computation as kern_hyp_va()
|
||||
*/
|
||||
|
|
|
@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = {
|
|||
.mask = PTE_UXN,
|
||||
.val = PTE_UXN,
|
||||
.set = "UXN",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = PTE_ATTRINDX_MASK,
|
||||
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
|
||||
|
|
|
@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
|
||||
unsigned long max_dma32 = min;
|
||||
unsigned long __maybe_unused max_dma = min;
|
||||
unsigned long __maybe_unused max_dma, max_dma32;
|
||||
|
||||
memset(zone_size, 0, sizeof(zone_size));
|
||||
|
||||
max_dma = max_dma32 = min;
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_dma = PFN_DOWN(arm64_dma_phys_limit);
|
||||
max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit);
|
||||
zone_size[ZONE_DMA] = max_dma - min;
|
||||
max_dma32 = max_dma;
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
|
||||
|
@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
unsigned long start = memblock_region_memory_base_pfn(reg);
|
||||
unsigned long end = memblock_region_memory_end_pfn(reg);
|
||||
|
||||
if (start >= max)
|
||||
continue;
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
if (start < max_dma) {
|
||||
unsigned long dma_end = min_not_zero(end, max_dma);
|
||||
if (start >= min && start < max_dma) {
|
||||
unsigned long dma_end = min(end, max_dma);
|
||||
zhole_size[ZONE_DMA] -= dma_end - start;
|
||||
start = dma_end;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (start < max_dma32) {
|
||||
if (start >= max_dma && start < max_dma32) {
|
||||
unsigned long dma32_end = min(end, max_dma32);
|
||||
unsigned long dma32_start = max(start, max_dma);
|
||||
zhole_size[ZONE_DMA32] -= dma32_end - dma32_start;
|
||||
zhole_size[ZONE_DMA32] -= dma32_end - start;
|
||||
start = dma32_end;
|
||||
}
|
||||
#endif
|
||||
if (end > max_dma32) {
|
||||
if (start >= max_dma32 && start < max) {
|
||||
unsigned long normal_end = min(end, max);
|
||||
unsigned long normal_start = max(start, max_dma32);
|
||||
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
|
||||
zhole_size[ZONE_NORMAL] -= normal_end - start;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue