2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* AArch64 loadable module support.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Limited
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*
|
|
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/gfp.h>
|
2015-10-12 23:52:58 +08:00
|
|
|
#include <linux/kasan.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/moduleloader.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2015-01-06 08:38:41 +08:00
|
|
|
#include <asm/alternative.h>
|
2014-01-07 22:17:10 +08:00
|
|
|
#include <asm/insn.h>
|
2014-11-28 21:40:45 +08:00
|
|
|
#include <asm/sections.h>
|
2014-01-07 22:17:10 +08:00
|
|
|
|
2012-03-05 19:49:33 +08:00
|
|
|
void *module_alloc(unsigned long size)
|
|
|
|
{
|
2017-04-28 02:19:02 +08:00
|
|
|
gfp_t gfp_mask = GFP_KERNEL;
|
2015-10-12 23:52:58 +08:00
|
|
|
void *p;
|
|
|
|
|
2017-04-28 02:19:02 +08:00
|
|
|
/* Silence the initial allocation */
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
|
|
|
gfp_mask |= __GFP_NOWARN;
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
|
|
|
|
module_alloc_base + MODULES_VSIZE,
|
2017-04-28 02:19:02 +08:00
|
|
|
gfp_mask, PAGE_KERNEL_EXEC, 0,
|
2015-10-12 23:52:58 +08:00
|
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
|
|
|
2015-11-24 19:37:35 +08:00
|
|
|
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
|
|
|
!IS_ENABLED(CONFIG_KASAN))
|
|
|
|
/*
|
|
|
|
* KASAN can only deal with module allocations being served
|
|
|
|
* from the reserved module region, since the remainder of
|
|
|
|
* the vmalloc region is already backed by zero shadow pages,
|
|
|
|
* and punching holes into it is non-trivial. Since the module
|
|
|
|
* region is not randomized when KASAN is enabled, it is even
|
|
|
|
* less likely that the module region gets exhausted, so we
|
|
|
|
* can simply omit this fallback in that case.
|
|
|
|
*/
|
|
|
|
p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
|
|
|
|
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
|
|
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
|
|
|
2015-10-12 23:52:58 +08:00
|
|
|
if (p && (kasan_module_alloc(p, size) < 0)) {
|
|
|
|
vfree(p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return p;
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
enum aarch64_reloc_op {
|
|
|
|
RELOC_OP_NONE,
|
|
|
|
RELOC_OP_ABS,
|
|
|
|
RELOC_OP_PREL,
|
|
|
|
RELOC_OP_PAGE,
|
|
|
|
};
|
|
|
|
|
2017-06-28 22:56:00 +08:00
|
|
|
static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
switch (reloc_op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
return val;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
return val - (u64)place;
|
|
|
|
case RELOC_OP_PAGE:
|
|
|
|
return (val & ~0xfff) - ((u64)place & ~0xfff);
|
|
|
|
case RELOC_OP_NONE:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
|
|
|
{
|
|
|
|
s64 sval = do_reloc(op, place, val);
|
|
|
|
|
|
|
|
switch (len) {
|
|
|
|
case 16:
|
|
|
|
*(s16 *)place = sval;
|
2016-01-05 17:18:52 +08:00
|
|
|
if (sval < S16_MIN || sval > U16_MAX)
|
|
|
|
return -ERANGE;
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
*(s32 *)place = sval;
|
2016-01-05 17:18:52 +08:00
|
|
|
if (sval < S32_MIN || sval > U32_MAX)
|
|
|
|
return -ERANGE;
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
*(s64 *)place = sval;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid length (%d) for data relocation\n", len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-05 17:18:51 +08:00
|
|
|
enum aarch64_insn_movw_imm_type {
|
|
|
|
AARCH64_INSN_IMM_MOVNZ,
|
|
|
|
AARCH64_INSN_IMM_MOVKZ,
|
|
|
|
};
|
|
|
|
|
2017-06-28 22:56:00 +08:00
|
|
|
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
2016-01-05 17:18:51 +08:00
|
|
|
int lsb, enum aarch64_insn_movw_imm_type imm_type)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
2016-01-05 17:18:51 +08:00
|
|
|
u64 imm;
|
2014-01-07 22:17:10 +08:00
|
|
|
s64 sval;
|
2017-06-28 22:56:00 +08:00
|
|
|
u32 insn = le32_to_cpu(*place);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2014-01-07 22:17:10 +08:00
|
|
|
sval = do_reloc(op, place, val);
|
2016-01-05 17:18:51 +08:00
|
|
|
imm = sval >> lsb;
|
2013-11-05 18:16:52 +08:00
|
|
|
|
2014-01-07 22:17:10 +08:00
|
|
|
if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
|
2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* For signed MOVW relocations, we have to manipulate the
|
|
|
|
* instruction encoding depending on whether or not the
|
|
|
|
* immediate is less than zero.
|
|
|
|
*/
|
|
|
|
insn &= ~(3 << 29);
|
2016-01-05 17:18:51 +08:00
|
|
|
if (sval >= 0) {
|
2012-03-05 19:49:33 +08:00
|
|
|
/* >=0: Set the instruction to MOVZ (opcode 10b). */
|
|
|
|
insn |= 2 << 29;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* <0: Set the instruction to MOVN (opcode 00b).
|
|
|
|
* Since we've masked the opcode already, we
|
|
|
|
* don't need to do anything other than
|
|
|
|
* inverting the new immediate field.
|
|
|
|
*/
|
|
|
|
imm = ~imm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the instruction with the new encoding. */
|
2016-01-05 17:18:51 +08:00
|
|
|
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
|
2017-06-28 22:56:00 +08:00
|
|
|
*place = cpu_to_le32(insn);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2016-01-05 17:18:51 +08:00
|
|
|
if (imm > U16_MAX)
|
2012-03-05 19:49:33 +08:00
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-28 22:56:00 +08:00
|
|
|
static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
2014-01-07 22:17:10 +08:00
|
|
|
int lsb, int len, enum aarch64_insn_imm_type imm_type)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
u64 imm, imm_mask;
|
|
|
|
s64 sval;
|
2017-06-28 22:56:00 +08:00
|
|
|
u32 insn = le32_to_cpu(*place);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/* Calculate the relocation value. */
|
|
|
|
sval = do_reloc(op, place, val);
|
|
|
|
sval >>= lsb;
|
|
|
|
|
|
|
|
/* Extract the value bits and shift them to bit 0. */
|
|
|
|
imm_mask = (BIT(lsb + len) - 1) >> lsb;
|
|
|
|
imm = sval & imm_mask;
|
|
|
|
|
|
|
|
/* Update the instruction's immediate field. */
|
2014-01-07 22:17:10 +08:00
|
|
|
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
|
2017-06-28 22:56:00 +08:00
|
|
|
*place = cpu_to_le32(insn);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the upper value bits (including the sign bit) and
|
|
|
|
* shift them to bit 0.
|
|
|
|
*/
|
|
|
|
sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Overflow has occurred if the upper bits are not all equal to
|
|
|
|
* the sign bit of the value.
|
|
|
|
*/
|
|
|
|
if ((u64)(sval + 1) >= 2)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|
|
|
const char *strtab,
|
|
|
|
unsigned int symindex,
|
|
|
|
unsigned int relsec,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int ovf;
|
|
|
|
bool overflow_check;
|
|
|
|
Elf64_Sym *sym;
|
|
|
|
void *loc;
|
|
|
|
u64 val;
|
|
|
|
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
|
|
|
|
|
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
|
|
/* loc corresponds to P in the AArch64 ELF document. */
|
|
|
|
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
|
|
|
+ rel[i].r_offset;
|
|
|
|
|
|
|
|
/* sym is the ELF symbol we're referring to. */
|
|
|
|
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
|
|
|
|
+ ELF64_R_SYM(rel[i].r_info);
|
|
|
|
|
|
|
|
/* val corresponds to (S + A) in the AArch64 ELF document. */
|
|
|
|
val = sym->st_value + rel[i].r_addend;
|
|
|
|
|
|
|
|
/* Check for overflow by default. */
|
|
|
|
overflow_check = true;
|
|
|
|
|
|
|
|
/* Perform the static relocation. */
|
|
|
|
switch (ELF64_R_TYPE(rel[i].r_info)) {
|
|
|
|
/* Null relocations. */
|
|
|
|
case R_ARM_NONE:
|
|
|
|
case R_AARCH64_NONE:
|
|
|
|
ovf = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Data relocations. */
|
|
|
|
case R_AARCH64_ABS64:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS32:
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS16:
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL64:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL32:
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL16:
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* MOVW instruction relocations. */
|
|
|
|
case R_AARCH64_MOVW_UABS_G0_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
case R_AARCH64_MOVW_UABS_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G1_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
case R_AARCH64_MOVW_UABS_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G2_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
case R_AARCH64_MOVW_UABS_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G3:
|
|
|
|
/* We're using the top bits so we can't overflow. */
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G0_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G1_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G2_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G3:
|
|
|
|
/* We're using the top bits so we can't overflow. */
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Immediate instruction relocations. */
|
|
|
|
case R_AARCH64_LD_PREL_LO19:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_19);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADR_PREL_LO21:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_ADR);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
2015-03-17 20:15:02 +08:00
|
|
|
#ifndef CONFIG_ARM64_ERRATUM_843419
|
2012-03-05 19:49:33 +08:00
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_ADR);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
2015-03-17 20:15:02 +08:00
|
|
|
#endif
|
2012-03-05 19:49:33 +08:00
|
|
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
|
|
|
case R_AARCH64_LDST8_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST16_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST32_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST128_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_TSTBR14:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_14);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_CONDBR19:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_19);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_JUMP26:
|
|
|
|
case R_AARCH64_CALL26:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_26);
|
2015-11-24 19:37:35 +08:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
|
|
|
ovf == -ERANGE) {
|
arm64: module: split core and init PLT sections
The arm64 module PLT code allocates all PLT entries in a single core
section, since the overhead of having a separate init PLT section is
not justified by the small number of PLT entries usually required for
init code.
However, the core and init module regions are allocated independently,
and there is a corner case where the core region may be allocated from
the VMALLOC region if the dedicated module region is exhausted, but the
init region, being much smaller, can still be allocated from the module
region. This leads to relocation failures if the distance between those
regions exceeds 128 MB. (In fact, this corner case is highly unlikely to
occur on arm64, but the issue has been observed on ARM, whose module
region is much smaller).
So split the core and init PLT regions, and name the latter ".init.plt"
so it gets allocated along with (and sufficiently close to) the .init
sections that it serves. Also, given that init PLT entries may need to
be emitted for branches that target the core module, modify the logic
that disregards defined symbols to only disregard symbols that are
defined in the same section as the relocated branch instruction.
Since there may now be two PLT entries associated with each entry in
the symbol table, we can no longer hijack the symbol::st_size fields
to record the addresses of PLT entries as we emit them for zero-addend
relocations. So instead, perform an explicit comparison to check for
duplicate entries.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-02-22 06:12:57 +08:00
|
|
|
val = module_emit_plt_entry(me, loc, &rel[i], sym);
|
2015-11-24 19:37:35 +08:00
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
|
|
|
|
26, AARCH64_INSN_IMM_26);
|
|
|
|
}
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
pr_err("module %s: unsupported RELA relocation: %llu\n",
|
|
|
|
me->name, ELF64_R_TYPE(rel[i].r_info));
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (overflow_check && ovf == -ERANGE)
|
|
|
|
goto overflow;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
overflow:
|
|
|
|
pr_err("module %s: overflow in relocation type %d val %Lx\n",
|
|
|
|
me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
2014-11-28 21:40:45 +08:00
|
|
|
|
|
|
|
int module_finalize(const Elf_Ehdr *hdr,
|
|
|
|
const Elf_Shdr *sechdrs,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
const Elf_Shdr *s, *se;
|
|
|
|
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
|
|
|
|
|
|
|
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
|
|
|
|
if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
|
|
|
|
apply_alternatives((void *)s->sh_addr, s->sh_size);
|
|
|
|
}
|
2017-06-07 01:00:22 +08:00
|
|
|
#ifdef CONFIG_ARM64_MODULE_PLTS
|
|
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
|
|
|
|
!strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
|
|
|
|
me->arch.ftrace_trampoline = (void *)s->sh_addr;
|
|
|
|
#endif
|
2014-11-28 21:40:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|