2019-02-02 17:41:15 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-04-26 04:06:45 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "efi: memattr: " fmt
|
|
|
|
|
|
|
|
#include <linux/efi.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
|
|
|
|
#include <asm/early_ioremap.h>
|
|
|
|
|
|
|
|
static int __initdata tbl_size;
|
2020-01-22 22:05:12 +08:00
|
|
|
unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
|
2016-04-26 04:06:45 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve the memory associated with the Memory Attributes configuration
|
|
|
|
* table, if it exists.
|
|
|
|
*/
|
|
|
|
int __init efi_memattr_init(void)
|
|
|
|
{
|
|
|
|
efi_memory_attributes_table_t *tbl;
|
|
|
|
|
2020-01-22 22:05:12 +08:00
|
|
|
if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
|
2016-04-26 04:06:45 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-01-22 22:05:12 +08:00
|
|
|
tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
|
2016-04-26 04:06:45 +08:00
|
|
|
if (!tbl) {
|
|
|
|
pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
|
2020-01-22 22:05:12 +08:00
|
|
|
efi_mem_attr_table);
|
2016-04-26 04:06:45 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tbl->version > 1) {
|
|
|
|
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
|
|
|
|
tbl->version);
|
|
|
|
goto unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
|
2020-01-22 22:05:12 +08:00
|
|
|
memblock_reserve(efi_mem_attr_table, tbl_size);
|
2017-01-31 21:21:36 +08:00
|
|
|
set_bit(EFI_MEM_ATTR, &efi.flags);
|
2016-04-26 04:06:45 +08:00
|
|
|
|
|
|
|
unmap:
|
|
|
|
early_memunmap(tbl, sizeof(*tbl));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a copy @out of the UEFI memory descriptor @in if it is covered
|
|
|
|
* entirely by a UEFI memory map entry with matching attributes. The virtual
|
|
|
|
* address of @out is set according to the matching entry that was found.
|
|
|
|
*/
|
|
|
|
static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
|
|
|
|
{
|
|
|
|
u64 in_paddr = in->phys_addr;
|
|
|
|
u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
|
|
|
|
efi_memory_desc_t *md;
|
|
|
|
|
|
|
|
*out = *in;
|
|
|
|
|
|
|
|
if (in->type != EFI_RUNTIME_SERVICES_CODE &&
|
|
|
|
in->type != EFI_RUNTIME_SERVICES_DATA) {
|
|
|
|
pr_warn("Entry type should be RuntimeServiceCode/Data\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
|
|
|
|
pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PAGE_SIZE > EFI_PAGE_SIZE &&
|
|
|
|
(!PAGE_ALIGNED(in->phys_addr) ||
|
|
|
|
!PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
|
|
|
|
/*
|
|
|
|
* Since arm64 may execute with page sizes of up to 64 KB, the
|
|
|
|
* UEFI spec mandates that RuntimeServices memory regions must
|
|
|
|
* be 64 KB aligned. We need to validate this here since we will
|
|
|
|
* not be able to tighten permissions on such regions without
|
|
|
|
* affecting adjacent regions.
|
|
|
|
*/
|
|
|
|
pr_warn("Entry address region misaligned\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_efi_memory_desc(md) {
|
|
|
|
u64 md_paddr = md->phys_addr;
|
|
|
|
u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
|
|
|
|
|
|
|
|
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
|
|
|
continue;
|
efi/memattr: Don't bail on zero VA if it equals the region's PA
The EFI memory attributes code cross-references the EFI memory map with
the more granular EFI memory attributes table to ensure that they are in
sync before applying the strict permissions to the regions it describes.
Since we always install virtual mappings for the EFI runtime regions to
which these strict permissions apply, we currently perform a sanity check
on the EFI memory descriptor, and ensure that the EFI_MEMORY_RUNTIME bit
is set, and that the virtual address has been assigned.
However, in cases where a runtime region exists at physical address 0x0,
and the virtual mapping equals the physical mapping, e.g., when running
in mixed mode on x86, we encounter a memory descriptor with the runtime
attribute and virtual address 0x0, and incorrectly draw the conclusion
that a runtime region exists for which no virtual mapping was installed,
and give up altogether. The consequence of this is that firmware mappings
retain their read-write-execute permissions, making the system more
vulnerable to attacks.
So let's only bail if the virtual address of 0x0 has been assigned to a
physical region that does not reside at address 0x0.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Alexander Graf <agraf@suse.de>
Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Jeffrey Hugo <jhugo@codeaurora.org>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Leif Lindholm <leif.lindholm@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Peter Jones <pjones@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-efi@vger.kernel.org
Fixes: 10f0d2f577053 ("efi: Implement generic support for the Memory ...")
Link: http://lkml.kernel.org/r/20190202094119.13230-4-ard.biesheuvel@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-02-02 17:41:12 +08:00
|
|
|
if (md->virt_addr == 0 && md->phys_addr != 0) {
|
2016-04-26 04:06:45 +08:00
|
|
|
/* no virtual mapping has been installed by the stub */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This entry covers the start of @in, check whether
|
|
|
|
* it covers the end as well.
|
|
|
|
*/
|
|
|
|
if (md_paddr + md_size < in_paddr + in_size) {
|
|
|
|
pr_warn("Entry covers multiple EFI memory map regions\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (md->type != in->type) {
|
|
|
|
pr_warn("Entry type deviates from EFI memory map region type\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_warn("No matching entry found in the EFI memory map\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To be called after the EFI page tables have been populated. If a memory
|
|
|
|
* attributes table is available, its contents will be used to update the
|
|
|
|
* mappings with tightened permissions as described by the table.
|
|
|
|
* This requires the UEFI memory map to have already been populated with
|
|
|
|
* virtual addresses.
|
|
|
|
*/
|
|
|
|
int __init efi_memattr_apply_permissions(struct mm_struct *mm,
|
|
|
|
efi_memattr_perm_setter fn)
|
|
|
|
{
|
|
|
|
efi_memory_attributes_table_t *tbl;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (tbl_size <= sizeof(*tbl))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need the EFI memory map to be setup so we can use it to
|
|
|
|
* lookup the virtual addresses of all entries in the of EFI
|
|
|
|
* Memory Attributes table. If it isn't available, this
|
|
|
|
* function should not be called.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
|
|
|
|
return 0;
|
|
|
|
|
2020-01-22 22:05:12 +08:00
|
|
|
tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB);
|
2016-04-26 04:06:45 +08:00
|
|
|
if (!tbl) {
|
|
|
|
pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
|
2020-01-22 22:05:12 +08:00
|
|
|
efi_mem_attr_table);
|
2016-04-26 04:06:45 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (efi_enabled(EFI_DBG))
|
|
|
|
pr_info("Processing EFI Memory Attributes table:\n");
|
|
|
|
|
|
|
|
for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
|
|
|
|
efi_memory_desc_t md;
|
|
|
|
unsigned long size;
|
|
|
|
bool valid;
|
|
|
|
char buf[64];
|
|
|
|
|
|
|
|
valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
|
|
|
|
&md);
|
|
|
|
size = md.num_pages << EFI_PAGE_SHIFT;
|
|
|
|
if (efi_enabled(EFI_DBG) || !valid)
|
|
|
|
pr_info("%s 0x%012llx-0x%012llx %s\n",
|
|
|
|
valid ? "" : "!", md.phys_addr,
|
|
|
|
md.phys_addr + size - 1,
|
|
|
|
efi_md_typeattr_format(buf, sizeof(buf), &md));
|
|
|
|
|
2017-01-31 21:21:37 +08:00
|
|
|
if (valid) {
|
2016-04-26 04:06:45 +08:00
|
|
|
ret = fn(mm, &md);
|
2017-01-31 21:21:37 +08:00
|
|
|
if (ret)
|
|
|
|
pr_err("Error updating mappings, skipping subsequent md's\n");
|
|
|
|
}
|
2016-04-26 04:06:45 +08:00
|
|
|
}
|
|
|
|
memunmap(tbl);
|
|
|
|
return ret;
|
|
|
|
}
|