2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* AArch64 loadable module support.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Limited
|
|
|
|
*
|
|
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/gfp.h>
|
2015-10-12 23:52:58 +08:00
|
|
|
#include <linux/kasan.h>
|
2012-03-05 19:49:33 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/moduleloader.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2015-01-06 08:38:41 +08:00
|
|
|
#include <asm/alternative.h>
|
2014-01-07 22:17:10 +08:00
|
|
|
#include <asm/insn.h>
|
2014-11-28 21:40:45 +08:00
|
|
|
#include <asm/sections.h>
|
2014-01-07 22:17:10 +08:00
|
|
|
|
2012-03-05 19:49:33 +08:00
|
|
|
void *module_alloc(unsigned long size)
|
|
|
|
{
|
arm64: kaslr: keep modules inside module region when KASAN is enabled
When KASLR and KASAN are both enabled, we keep the modules where they
are, and randomize the placement of the kernel so it is within 2 GB
of the module region. The reason for this is that putting modules in
the vmalloc region (like we normally do when KASLR is enabled) is not
possible in this case, given that the entire vmalloc region is already
backed by KASAN zero shadow pages, and so allocating dedicated KASAN
shadow space as required by loaded modules is not possible.
The default module allocation window is set to [_etext - 128MB, _etext]
in kaslr.c, which is appropriate for KASLR kernels booted without a
seed or with 'nokaslr' on the command line. However, as it turns out,
it is not quite correct for the KASAN case, since it still intersects
the vmalloc region at the top, where attempts to allocate shadow pages
will collide with the KASAN zero shadow pages, causing a WARN() and all
kinds of other trouble. So cap the top end to MODULES_END explicitly
when running with KASAN.
Cc: <stable@vger.kernel.org> # 4.9+
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2019-06-26 01:08:54 +08:00
|
|
|
u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
|
2017-04-28 02:19:02 +08:00
|
|
|
gfp_t gfp_mask = GFP_KERNEL;
|
2015-10-12 23:52:58 +08:00
|
|
|
void *p;
|
|
|
|
|
2017-04-28 02:19:02 +08:00
|
|
|
/* Silence the initial allocation */
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
|
|
|
gfp_mask |= __GFP_NOWARN;
|
|
|
|
|
arm64: kaslr: keep modules inside module region when KASAN is enabled
When KASLR and KASAN are both enabled, we keep the modules where they
are, and randomize the placement of the kernel so it is within 2 GB
of the module region. The reason for this is that putting modules in
the vmalloc region (like we normally do when KASLR is enabled) is not
possible in this case, given that the entire vmalloc region is already
backed by KASAN zero shadow pages, and so allocating dedicated KASAN
shadow space as required by loaded modules is not possible.
The default module allocation window is set to [_etext - 128MB, _etext]
in kaslr.c, which is appropriate for KASLR kernels booted without a
seed or with 'nokaslr' on the command line. However, as it turns out,
it is not quite correct for the KASAN case, since it still intersects
the vmalloc region at the top, where attempts to allocate shadow pages
will collide with the KASAN zero shadow pages, causing a WARN() and all
kinds of other trouble. So cap the top end to MODULES_END explicitly
when running with KASAN.
Cc: <stable@vger.kernel.org> # 4.9+
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2019-06-26 01:08:54 +08:00
|
|
|
if (IS_ENABLED(CONFIG_KASAN))
|
|
|
|
/* don't exceed the static module region - see below */
|
|
|
|
module_alloc_end = MODULES_END;
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 21:12:01 +08:00
|
|
|
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
|
2019-07-09 00:54:55 +08:00
|
|
|
module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
|
2015-10-12 23:52:58 +08:00
|
|
|
NUMA_NO_NODE, __builtin_return_address(0));
|
|
|
|
|
2015-11-24 19:37:35 +08:00
|
|
|
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
|
|
|
!IS_ENABLED(CONFIG_KASAN))
|
|
|
|
/*
|
|
|
|
* KASAN can only deal with module allocations being served
|
|
|
|
* from the reserved module region, since the remainder of
|
|
|
|
* the vmalloc region is already backed by zero shadow pages,
|
|
|
|
* and punching holes into it is non-trivial. Since the module
|
|
|
|
* region is not randomized when KASAN is enabled, it is even
|
|
|
|
* less likely that the module region gets exhausted, so we
|
|
|
|
* can simply omit this fallback in that case.
|
|
|
|
*/
|
arm64/kernel: kaslr: reduce module randomization range to 4 GB
We currently have to rely on the GCC large code model for KASLR for
two distinct but related reasons:
- if we enable full randomization, modules will be loaded very far away
from the core kernel, where they are out of range for ADRP instructions,
- even without full randomization, the fact that the 128 MB module region
is now no longer fully reserved for kernel modules means that there is
a very low likelihood that the normal bottom-up allocation of other
vmalloc regions may collide, and use up the range for other things.
Large model code is suboptimal, given that each symbol reference involves
a literal load that goes through the D-cache, reducing cache utilization.
But more importantly, literals are not instructions but part of .text
nonetheless, and hence mapped with executable permissions.
So let's get rid of our dependency on the large model for KASLR, by:
- reducing the full randomization range to 4 GB, thereby ensuring that
ADRP references between modules and the kernel are always in range,
- reduce the spillover range to 4 GB as well, so that we fallback to a
region that is still guaranteed to be in range
- move the randomization window of the core kernel to the middle of the
VMALLOC space
Note that KASAN always uses the module region outside of the vmalloc space,
so keep the kernel close to that if KASAN is enabled.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-07 01:15:32 +08:00
|
|
|
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
|
2019-05-23 17:17:37 +08:00
|
|
|
module_alloc_base + SZ_2G, GFP_KERNEL,
|
2019-05-23 18:22:53 +08:00
|
|
|
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
arm64/kernel: kaslr: reduce module randomization range to 4 GB
We currently have to rely on the GCC large code model for KASLR for
two distinct but related reasons:
- if we enable full randomization, modules will be loaded very far away
from the core kernel, where they are out of range for ADRP instructions,
- even without full randomization, the fact that the 128 MB module region
is now no longer fully reserved for kernel modules means that there is
a very low likelihood that the normal bottom-up allocation of other
vmalloc regions may collide, and use up the range for other things.
Large model code is suboptimal, given that each symbol reference involves
a literal load that goes through the D-cache, reducing cache utilization.
But more importantly, literals are not instructions but part of .text
nonetheless, and hence mapped with executable permissions.
So let's get rid of our dependency on the large model for KASLR, by:
- reducing the full randomization range to 4 GB, thereby ensuring that
ADRP references between modules and the kernel are always in range,
- reduce the spillover range to 4 GB as well, so that we fallback to a
region that is still guaranteed to be in range
- move the randomization window of the core kernel to the middle of the
VMALLOC space
Note that KASAN always uses the module region outside of the vmalloc space,
so keep the kernel close to that if KASAN is enabled.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-07 01:15:32 +08:00
|
|
|
__builtin_return_address(0));
|
2015-11-24 19:37:35 +08:00
|
|
|
|
2015-10-12 23:52:58 +08:00
|
|
|
if (p && (kasan_module_alloc(p, size) < 0)) {
|
|
|
|
vfree(p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return p;
|
2012-03-05 19:49:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
enum aarch64_reloc_op {
|
|
|
|
RELOC_OP_NONE,
|
|
|
|
RELOC_OP_ABS,
|
|
|
|
RELOC_OP_PREL,
|
|
|
|
RELOC_OP_PAGE,
|
|
|
|
};
|
|
|
|
|
2017-06-28 22:56:00 +08:00
|
|
|
static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
switch (reloc_op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
return val;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
return val - (u64)place;
|
|
|
|
case RELOC_OP_PAGE:
|
|
|
|
return (val & ~0xfff) - ((u64)place & ~0xfff);
|
|
|
|
case RELOC_OP_NONE:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
|
|
|
{
|
|
|
|
s64 sval = do_reloc(op, place, val);
|
|
|
|
|
2019-05-23 18:38:54 +08:00
|
|
|
/*
|
|
|
|
* The ELF psABI for AArch64 documents the 16-bit and 32-bit place
|
2019-05-28 22:13:16 +08:00
|
|
|
* relative and absolute relocations as having a range of [-2^15, 2^16)
|
|
|
|
* or [-2^31, 2^32), respectively. However, in order to be able to
|
|
|
|
* detect overflows reliably, we have to choose whether we interpret
|
|
|
|
* such quantities as signed or as unsigned, and stick with it.
|
2019-05-23 18:38:54 +08:00
|
|
|
* The way we organize our address space requires a signed
|
|
|
|
* interpretation of 32-bit relative references, so let's use that
|
|
|
|
* for all R_AARCH64_PRELxx relocations. This means our upper
|
|
|
|
* bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
|
|
|
|
*/
|
|
|
|
|
2012-03-05 19:49:33 +08:00
|
|
|
switch (len) {
|
|
|
|
case 16:
|
|
|
|
*(s16 *)place = sval;
|
2019-05-28 22:13:16 +08:00
|
|
|
switch (op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
if (sval < 0 || sval > U16_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
if (sval < S16_MIN || sval > S16_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid 16-bit data relocation (%d)\n", op);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
*(s32 *)place = sval;
|
2019-05-28 22:13:16 +08:00
|
|
|
switch (op) {
|
|
|
|
case RELOC_OP_ABS:
|
|
|
|
if (sval < 0 || sval > U32_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
case RELOC_OP_PREL:
|
|
|
|
if (sval < S32_MIN || sval > S32_MAX)
|
|
|
|
return -ERANGE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid 32-bit data relocation (%d)\n", op);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
*(s64 *)place = sval;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Invalid length (%d) for data relocation\n", len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-05 17:18:51 +08:00
|
|
|
enum aarch64_insn_movw_imm_type {
|
|
|
|
AARCH64_INSN_IMM_MOVNZ,
|
|
|
|
AARCH64_INSN_IMM_MOVKZ,
|
|
|
|
};
|
|
|
|
|
2017-06-28 22:56:00 +08:00
|
|
|
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
2016-01-05 17:18:51 +08:00
|
|
|
int lsb, enum aarch64_insn_movw_imm_type imm_type)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
2016-01-05 17:18:51 +08:00
|
|
|
u64 imm;
|
2014-01-07 22:17:10 +08:00
|
|
|
s64 sval;
|
2017-06-28 22:56:00 +08:00
|
|
|
u32 insn = le32_to_cpu(*place);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2014-01-07 22:17:10 +08:00
|
|
|
sval = do_reloc(op, place, val);
|
2016-01-05 17:18:51 +08:00
|
|
|
imm = sval >> lsb;
|
2013-11-05 18:16:52 +08:00
|
|
|
|
2014-01-07 22:17:10 +08:00
|
|
|
if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
|
2012-03-05 19:49:33 +08:00
|
|
|
/*
|
|
|
|
* For signed MOVW relocations, we have to manipulate the
|
|
|
|
* instruction encoding depending on whether or not the
|
|
|
|
* immediate is less than zero.
|
|
|
|
*/
|
|
|
|
insn &= ~(3 << 29);
|
2016-01-05 17:18:51 +08:00
|
|
|
if (sval >= 0) {
|
2012-03-05 19:49:33 +08:00
|
|
|
/* >=0: Set the instruction to MOVZ (opcode 10b). */
|
|
|
|
insn |= 2 << 29;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* <0: Set the instruction to MOVN (opcode 00b).
|
|
|
|
* Since we've masked the opcode already, we
|
|
|
|
* don't need to do anything other than
|
|
|
|
* inverting the new immediate field.
|
|
|
|
*/
|
|
|
|
imm = ~imm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the instruction with the new encoding. */
|
2016-01-05 17:18:51 +08:00
|
|
|
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
|
2017-06-28 22:56:00 +08:00
|
|
|
*place = cpu_to_le32(insn);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
2016-01-05 17:18:51 +08:00
|
|
|
if (imm > U16_MAX)
|
2012-03-05 19:49:33 +08:00
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-28 22:56:00 +08:00
|
|
|
static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
2014-01-07 22:17:10 +08:00
|
|
|
int lsb, int len, enum aarch64_insn_imm_type imm_type)
|
2012-03-05 19:49:33 +08:00
|
|
|
{
|
|
|
|
u64 imm, imm_mask;
|
|
|
|
s64 sval;
|
2017-06-28 22:56:00 +08:00
|
|
|
u32 insn = le32_to_cpu(*place);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/* Calculate the relocation value. */
|
|
|
|
sval = do_reloc(op, place, val);
|
|
|
|
sval >>= lsb;
|
|
|
|
|
|
|
|
/* Extract the value bits and shift them to bit 0. */
|
|
|
|
imm_mask = (BIT(lsb + len) - 1) >> lsb;
|
|
|
|
imm = sval & imm_mask;
|
|
|
|
|
|
|
|
/* Update the instruction's immediate field. */
|
2014-01-07 22:17:10 +08:00
|
|
|
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
|
2017-06-28 22:56:00 +08:00
|
|
|
*place = cpu_to_le32(insn);
|
2012-03-05 19:49:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the upper value bits (including the sign bit) and
|
|
|
|
* shift them to bit 0.
|
|
|
|
*/
|
|
|
|
sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Overflow has occurred if the upper bits are not all equal to
|
|
|
|
* the sign bit of the value.
|
|
|
|
*/
|
|
|
|
if ((u64)(sval + 1) >= 2)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-06 02:53:23 +08:00
|
|
|
static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
|
|
|
__le32 *place, u64 val)
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-07 01:15:33 +08:00
|
|
|
{
|
|
|
|
u32 insn;
|
|
|
|
|
2018-11-22 16:46:46 +08:00
|
|
|
if (!is_forbidden_offset_for_adrp(place))
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-07 01:15:33 +08:00
|
|
|
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
|
|
|
|
AARCH64_INSN_IMM_ADR);
|
|
|
|
|
|
|
|
/* patch ADRP to ADR if it is in range */
|
|
|
|
if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
|
|
|
|
AARCH64_INSN_IMM_ADR)) {
|
|
|
|
insn = le32_to_cpu(*place);
|
|
|
|
insn &= ~BIT(31);
|
|
|
|
} else {
|
|
|
|
/* out of range for ADR -> emit a veneer */
|
2018-11-06 02:53:23 +08:00
|
|
|
val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-07 01:15:33 +08:00
|
|
|
if (!val)
|
|
|
|
return -ENOEXEC;
|
|
|
|
insn = aarch64_insn_gen_branch_imm((u64)place, val,
|
|
|
|
AARCH64_INSN_BRANCH_NOLINK);
|
|
|
|
}
|
|
|
|
|
|
|
|
*place = cpu_to_le32(insn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:33 +08:00
|
|
|
int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|
|
|
const char *strtab,
|
|
|
|
unsigned int symindex,
|
|
|
|
unsigned int relsec,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int ovf;
|
|
|
|
bool overflow_check;
|
|
|
|
Elf64_Sym *sym;
|
|
|
|
void *loc;
|
|
|
|
u64 val;
|
|
|
|
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
|
|
|
|
|
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
|
|
/* loc corresponds to P in the AArch64 ELF document. */
|
|
|
|
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
|
|
|
+ rel[i].r_offset;
|
|
|
|
|
|
|
|
/* sym is the ELF symbol we're referring to. */
|
|
|
|
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
|
|
|
|
+ ELF64_R_SYM(rel[i].r_info);
|
|
|
|
|
|
|
|
/* val corresponds to (S + A) in the AArch64 ELF document. */
|
|
|
|
val = sym->st_value + rel[i].r_addend;
|
|
|
|
|
|
|
|
/* Check for overflow by default. */
|
|
|
|
overflow_check = true;
|
|
|
|
|
|
|
|
/* Perform the static relocation. */
|
|
|
|
switch (ELF64_R_TYPE(rel[i].r_info)) {
|
|
|
|
/* Null relocations. */
|
|
|
|
case R_ARM_NONE:
|
|
|
|
case R_AARCH64_NONE:
|
|
|
|
ovf = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Data relocations. */
|
|
|
|
case R_AARCH64_ABS64:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS32:
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS16:
|
|
|
|
ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL64:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL32:
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL16:
|
|
|
|
ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* MOVW instruction relocations. */
|
|
|
|
case R_AARCH64_MOVW_UABS_G0_NC:
|
|
|
|
overflow_check = false;
|
2019-07-26 19:27:21 +08:00
|
|
|
/* Fall through */
|
2012-03-05 19:49:33 +08:00
|
|
|
case R_AARCH64_MOVW_UABS_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G1_NC:
|
|
|
|
overflow_check = false;
|
2019-07-26 19:27:21 +08:00
|
|
|
/* Fall through */
|
2012-03-05 19:49:33 +08:00
|
|
|
case R_AARCH64_MOVW_UABS_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G2_NC:
|
|
|
|
overflow_check = false;
|
2019-07-26 19:27:21 +08:00
|
|
|
/* Fall through */
|
2012-03-05 19:49:33 +08:00
|
|
|
case R_AARCH64_MOVW_UABS_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_UABS_G3:
|
|
|
|
/* We're using the top bits so we can't overflow. */
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_SABS_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G0_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G0:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G1_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G1:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G2_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
2016-01-05 17:18:51 +08:00
|
|
|
AARCH64_INSN_IMM_MOVKZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G2:
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_MOVW_PREL_G3:
|
|
|
|
/* We're using the top bits so we can't overflow. */
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_MOVNZ);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Immediate instruction relocations. */
|
|
|
|
case R_AARCH64_LD_PREL_LO19:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_19);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADR_PREL_LO21:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_ADR);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
|
|
|
overflow_check = false;
|
2019-07-26 19:27:21 +08:00
|
|
|
/* Fall through */
|
2012-03-05 19:49:33 +08:00
|
|
|
case R_AARCH64_ADR_PREL_PG_HI21:
|
2018-11-06 02:53:23 +08:00
|
|
|
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
|
arm64/kernel: don't ban ADRP to work around Cortex-A53 erratum #843419
Working around Cortex-A53 erratum #843419 involves special handling of
ADRP instructions that end up in the last two instruction slots of a
4k page, or whose output register gets overwritten without having been
read. (Note that the latter instruction sequence is never emitted by
a properly functioning compiler, which is why it is disregarded by the
handling of the same erratum in the bfd.ld linker which we rely on for
the core kernel)
Normally, this gets taken care of by the linker, which can spot such
sequences at final link time, and insert a veneer if the ADRP ends up
at a vulnerable offset. However, linux kernel modules are partially
linked ELF objects, and so there is no 'final link time' other than the
runtime loading of the module, at which time all the static relocations
are resolved.
For this reason, we have implemented the #843419 workaround for modules
by avoiding ADRP instructions altogether, by using the large C model,
and by passing -mpc-relative-literal-loads to recent versions of GCC
that may emit adrp/ldr pairs to perform literal loads. However, this
workaround forces us to keep literal data mixed with the instructions
in the executable .text segment, and literal data may inadvertently
turn into an exploitable speculative gadget depending on the relative
offsets of arbitrary symbols.
So let's reimplement this workaround in a way that allows us to switch
back to the small C model, and to drop the -mpc-relative-literal-loads
GCC switch, by patching affected ADRP instructions at runtime:
- ADRP instructions that do not appear at 4k relative offset 0xff8 or
0xffc are ignored
- ADRP instructions that are within 1 MB of their target symbol are
converted into ADR instructions
- remaining ADRP instructions are redirected via a veneer that performs
the load using an unaffected movn/movk sequence.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[will: tidied up ADRP -> ADR instruction patching.]
[will: use ULL suffix for 64-bit immediate]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2018-03-07 01:15:33 +08:00
|
|
|
if (ovf && ovf != -ERANGE)
|
|
|
|
return ovf;
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
|
|
|
case R_AARCH64_LDST8_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST16_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST32_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_LDST128_ABS_LO12_NC:
|
|
|
|
overflow_check = false;
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_12);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_TSTBR14:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_14);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_CONDBR19:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_19);
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
case R_AARCH64_JUMP26:
|
|
|
|
case R_AARCH64_CALL26:
|
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
|
2014-01-07 22:17:10 +08:00
|
|
|
AARCH64_INSN_IMM_26);
|
2015-11-24 19:37:35 +08:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
|
|
|
ovf == -ERANGE) {
|
2018-11-06 02:53:23 +08:00
|
|
|
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
|
2018-03-07 01:15:31 +08:00
|
|
|
if (!val)
|
|
|
|
return -ENOEXEC;
|
2015-11-24 19:37:35 +08:00
|
|
|
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
|
|
|
|
26, AARCH64_INSN_IMM_26);
|
|
|
|
}
|
2012-03-05 19:49:33 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
pr_err("module %s: unsupported RELA relocation: %llu\n",
|
|
|
|
me->name, ELF64_R_TYPE(rel[i].r_info));
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (overflow_check && ovf == -ERANGE)
|
|
|
|
goto overflow;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
overflow:
|
|
|
|
pr_err("module %s: overflow in relocation type %d val %Lx\n",
|
|
|
|
me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
2014-11-28 21:40:45 +08:00
|
|
|
|
|
|
|
int module_finalize(const Elf_Ehdr *hdr,
|
|
|
|
const Elf_Shdr *sechdrs,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
const Elf_Shdr *s, *se;
|
|
|
|
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
|
|
|
|
|
|
|
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
|
2018-06-22 16:31:15 +08:00
|
|
|
if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
|
|
|
|
apply_alternatives_module((void *)s->sh_addr, s->sh_size);
|
2017-06-07 01:00:22 +08:00
|
|
|
#ifdef CONFIG_ARM64_MODULE_PLTS
|
|
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
|
|
|
|
!strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
|
|
|
|
me->arch.ftrace_trampoline = (void *)s->sh_addr;
|
|
|
|
#endif
|
2014-11-28 21:40:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|