2017-12-28 02:55:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2014-02-28 21:42:48 +08:00
|
|
|
/*
|
|
|
|
* Device tree based initialization code for reserved memory.
|
|
|
|
*
|
2015-09-16 09:30:36 +08:00
|
|
|
* Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
|
2014-02-28 21:42:48 +08:00
|
|
|
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com
|
|
|
|
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
|
|
|
|
* Author: Josh Cartwright <joshc@codeaurora.org>
|
|
|
|
*/
|
|
|
|
|
2016-06-15 21:32:18 +08:00
|
|
|
#define pr_fmt(fmt) "OF: reserved mem: " fmt
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_fdt.h>
|
|
|
|
#include <linux/of_platform.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sizes.h>
|
|
|
|
#include <linux/of_reserved_mem.h>
|
2015-09-16 09:30:36 +08:00
|
|
|
#include <linux/sort.h>
|
2016-05-24 21:31:24 +08:00
|
|
|
#include <linux/slab.h>
|
2018-10-31 06:07:44 +08:00
|
|
|
#include <linux/memblock.h>
|
2021-10-21 15:09:29 +08:00
|
|
|
#include <linux/kmemleak.h>
|
2022-03-23 05:43:17 +08:00
|
|
|
#include <linux/cma.h>
|
2014-02-28 21:42:48 +08:00
|
|
|
|
2021-05-28 03:38:41 +08:00
|
|
|
#include "of_private.h"
|
|
|
|
|
2020-02-25 02:02:32 +08:00
|
|
|
#define MAX_RESERVED_REGIONS 64
|
2014-02-28 21:42:48 +08:00
|
|
|
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
|
|
|
|
static int reserved_mem_count;
|
|
|
|
|
2019-02-11 21:35:45 +08:00
|
|
|
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
|
2014-02-28 21:42:48 +08:00
|
|
|
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
|
|
|
|
phys_addr_t *res_base)
|
|
|
|
{
|
2016-02-22 21:45:44 +08:00
|
|
|
phys_addr_t base;
|
2021-09-03 06:00:26 +08:00
|
|
|
int err = 0;
|
2019-03-12 14:29:31 +08:00
|
|
|
|
2016-02-22 21:45:44 +08:00
|
|
|
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
align = !align ? SMP_CACHE_BYTES : align;
|
2021-09-03 06:00:26 +08:00
|
|
|
base = memblock_phys_alloc_range(size, align, start, end);
|
2014-02-28 21:42:48 +08:00
|
|
|
if (!base)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
*res_base = base;
|
2021-09-03 06:00:26 +08:00
|
|
|
if (nomap) {
|
|
|
|
err = memblock_mark_nomap(base, size);
|
|
|
|
if (err)
|
2021-11-06 04:43:19 +08:00
|
|
|
memblock_phys_free(base, size);
|
2021-09-03 06:00:26 +08:00
|
|
|
}
|
of: fix kmemleak crash caused by imbalance in early memory reservation
Marc Gonzalez reported the following kmemleak crash:
Unable to handle kernel paging request at virtual address ffffffc021e00000
Mem abort info:
ESR = 0x96000006
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000006
CM = 0, WnR = 0
swapper pgtable: 4k pages, 39-bit VAs, pgdp = (____ptrval____) [ffffffc021e00000] pgd=000000017e3ba803, pud=000000017e3ba803, pmd=0000000000000000
Internal error: Oops: 96000006 [#1] PREEMPT SMP
Modules linked in:
CPU: 6 PID: 523 Comm: kmemleak Tainted: G S W 5.0.0-rc1 #13
Hardware name: Qualcomm Technologies, Inc. MSM8998 v1 MTP (DT)
pstate: 80000085 (Nzcv daIf -PAN -UAO)
pc : scan_block+0x70/0x190
lr : scan_block+0x6c/0x190
Process kmemleak (pid: 523, stack limit = 0x(____ptrval____))
Call trace:
scan_block+0x70/0x190
scan_gray_list+0x108/0x1c0
kmemleak_scan+0x33c/0x7c0
kmemleak_scan_thread+0x98/0xf0
kthread+0x11c/0x120
ret_from_fork+0x10/0x1c
Code: f9000fb4 d503201f 97ffffd2 35000580 (f9400260)
The crash happens when a no-map area is allocated in
early_init_dt_alloc_reserved_memory_arch(). The allocated region is
registered with kmemleak, but it is then removed from memblock using
memblock_remove() that is not kmemleak-aware.
Replacing memblock_phys_alloc_range() with memblock_find_in_range()
makes sure that the allocated memory is not added to kmemleak and then
memblock_remove()'ing this memory is safe.
As a bonus, since memblock_find_in_range() ensures the allocation in the
specified range, the bounds check can be removed.
[rppt@linux.ibm.com: of: fix parameters order for call to memblock_find_in_range()]
Link: http://lkml.kernel.org/r/20190221112619.GC32004@rapoport-lnx
Link: http://lkml.kernel.org/r/20190213181921.GB15270@rapoport-lnx
Fixes: 3f0c820664483 ("drivers: of: add initialization code for dynamic reserved memory")
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Prateek Patel <prpatel@nvidia.com>
Tested-by: Marc Gonzalez <marc.w.gonzalez@free.fr>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 14:30:58 +08:00
|
|
|
|
of: reserved_mem: Have kmemleak ignore dynamically allocated reserved mem
Patch series "Fix kmemleak crashes when scanning CMA regions", v2.
When trying to boot a device with an ARM64 kernel with the following
config options enabled:
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
CONFIG_DEBUG_KMEMLEAK=y
a crash is encountered when kmemleak starts to scan the list of gray
or allocated objects that it maintains. Upon closer inspection, it was
observed that these page-faults always occurred when kmemleak attempted
to scan a CMA region.
At the moment, kmemleak is made aware of CMA regions that are specified
through the devicetree to be dynamically allocated within a range of
addresses. However, kmemleak should not need to scan CMA regions or any
reserved memory region, as those regions can be used for DMA transfers
between drivers and peripherals, and thus wouldn't contain anything
useful for kmemleak.
Additionally, since CMA regions are unmapped from the kernel's address
space when they are freed to the buddy allocator at boot when
CONFIG_DEBUG_PAGEALLOC is enabled, kmemleak shouldn't attempt to access
those memory regions, as that will trigger a crash. Thus, kmemleak
should ignore all dynamically allocated reserved memory regions.
This patch (of 1):
Currently, kmemleak ignores dynamically allocated reserved memory regions
that don't have a kernel mapping. However, regions that do retain a
kernel mapping (e.g. CMA regions) do get scanned by kmemleak.
This is not ideal for two reasons:
1 kmemleak works by scanning memory regions for pointers to allocated
objects to determine if those objects have been leaked or not.
However, reserved memory regions can be used between drivers and
peripherals for DMA transfers, and thus, would not contain pointers to
allocated objects, making it unnecessary for kmemleak to scan these
reserved memory regions.
2 When CONFIG_DEBUG_PAGEALLOC is enabled, along with kmemleak, the
CMA reserved memory regions are unmapped from the kernel's address
space when they are freed to buddy at boot. These CMA reserved regions
are still tracked by kmemleak, however, and when kmemleak attempts to
scan them, a crash will happen, as accessing the CMA region will result
in a page-fault, since the regions are unmapped.
Thus, use kmemleak_ignore_phys() for all dynamically allocated reserved
memory regions, instead of those that do not have a kernel mapping
associated with them.
Link: https://lkml.kernel.org/r/20230208232001.2052777-1-isaacmanjarres@google.com
Link: https://lkml.kernel.org/r/20230208232001.2052777-2-isaacmanjarres@google.com
Fixes: a7259df76702 ("memblock: make memblock_find_in_range method private")
Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: Kirill A. Shutemov <kirill.shtuemov@linux.intel.com>
Cc: Nick Kossifidis <mick@ics.forth.gr>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Cc: Saravana Kannan <saravanak@google.com>
Cc: <stable@vger.kernel.org> [5.15+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-02-09 07:20:00 +08:00
|
|
|
kmemleak_ignore_phys(base);
|
|
|
|
|
2021-09-03 06:00:26 +08:00
|
|
|
return err;
|
2014-02-28 21:42:48 +08:00
|
|
|
}
|
|
|
|
|
2021-03-18 18:40:36 +08:00
|
|
|
/*
|
2020-05-11 23:04:57 +08:00
|
|
|
* fdt_reserved_mem_save_node() - save fdt node for second pass initialization
|
2014-02-28 21:42:48 +08:00
|
|
|
*/
|
|
|
|
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
|
|
|
|
phys_addr_t base, phys_addr_t size)
|
|
|
|
{
|
|
|
|
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
|
|
|
|
|
|
|
|
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
|
2020-06-04 13:49:00 +08:00
|
|
|
pr_err("not enough space for all defined regions.\n");
|
2014-02-28 21:42:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rmem->fdt_node = node;
|
|
|
|
rmem->name = uname;
|
|
|
|
rmem->base = base;
|
|
|
|
rmem->size = size;
|
|
|
|
|
|
|
|
reserved_mem_count++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-06-15 03:20:42 +08:00
|
|
|
/*
|
|
|
|
* __reserved_mem_alloc_in_range() - allocate reserved memory described with
|
|
|
|
* 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
|
|
|
|
* reserved regions to keep the reserved memory contiguous if possible.
|
|
|
|
*/
|
|
|
|
static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
|
|
|
|
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
|
|
|
|
phys_addr_t *res_base)
|
|
|
|
{
|
|
|
|
bool prev_bottom_up = memblock_bottom_up();
|
|
|
|
bool bottom_up = false, top_down = false;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
for (i = 0; i < reserved_mem_count; i++) {
|
|
|
|
struct reserved_mem *rmem = &reserved_mem[i];
|
|
|
|
|
|
|
|
/* Skip regions that were not reserved yet */
|
|
|
|
if (rmem->size == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If range starts next to an existing reservation, use bottom-up:
|
|
|
|
* |....RRRR................RRRRRRRR..............|
|
|
|
|
* --RRRR------
|
|
|
|
*/
|
|
|
|
if (start >= rmem->base && start <= (rmem->base + rmem->size))
|
|
|
|
bottom_up = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If range ends next to an existing reservation, use top-down:
|
|
|
|
* |....RRRR................RRRRRRRR..............|
|
|
|
|
* -------RRRR-----
|
|
|
|
*/
|
|
|
|
if (end >= rmem->base && end <= (rmem->base + rmem->size))
|
|
|
|
top_down = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change setting only if either bottom-up or top-down was selected */
|
|
|
|
if (bottom_up != top_down)
|
|
|
|
memblock_set_bottom_up(bottom_up);
|
|
|
|
|
|
|
|
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
|
|
|
|
start, end, nomap, res_base);
|
|
|
|
|
|
|
|
/* Restore old setting if needed */
|
|
|
|
if (bottom_up != top_down)
|
|
|
|
memblock_set_bottom_up(prev_bottom_up);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-03-18 18:40:36 +08:00
|
|
|
/*
|
2020-05-11 23:04:57 +08:00
|
|
|
* __reserved_mem_alloc_size() - allocate reserved memory described by
|
2020-07-30 17:23:53 +08:00
|
|
|
* 'size', 'alignment' and 'alloc-ranges' properties.
|
2014-02-28 21:42:48 +08:00
|
|
|
*/
|
|
|
|
static int __init __reserved_mem_alloc_size(unsigned long node,
|
|
|
|
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
|
|
|
|
{
|
|
|
|
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
|
|
|
|
phys_addr_t start = 0, end = 0;
|
|
|
|
phys_addr_t base = 0, align = 0, size;
|
2014-04-02 12:49:03 +08:00
|
|
|
int len;
|
|
|
|
const __be32 *prop;
|
2020-07-30 17:23:53 +08:00
|
|
|
bool nomap;
|
2014-02-28 21:42:48 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
prop = of_get_flat_dt_prop(node, "size", &len);
|
|
|
|
if (!prop)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (len != dt_root_size_cells * sizeof(__be32)) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("invalid size property in '%s' node.\n", uname);
|
2014-02-28 21:42:48 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
size = dt_mem_next_cell(dt_root_size_cells, &prop);
|
|
|
|
|
|
|
|
prop = of_get_flat_dt_prop(node, "alignment", &len);
|
|
|
|
if (prop) {
|
|
|
|
if (len != dt_root_addr_cells * sizeof(__be32)) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("invalid alignment property in '%s' node.\n",
|
2014-02-28 21:42:48 +08:00
|
|
|
uname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
align = dt_mem_next_cell(dt_root_addr_cells, &prop);
|
|
|
|
}
|
|
|
|
|
2020-07-30 17:23:53 +08:00
|
|
|
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
|
|
|
|
|
2015-11-10 20:30:26 +08:00
|
|
|
/* Need adjust the alignment to satisfy the CMA requirement */
|
2016-05-25 12:29:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_CMA)
|
|
|
|
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
|
|
|
|
&& of_get_flat_dt_prop(node, "reusable", NULL)
|
2022-03-23 05:43:17 +08:00
|
|
|
&& !nomap)
|
|
|
|
align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
|
2015-11-10 20:30:26 +08:00
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
|
|
|
|
if (prop) {
|
|
|
|
|
|
|
|
if (len % t_len != 0) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
|
2014-02-28 21:42:48 +08:00
|
|
|
uname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
base = 0;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
|
|
|
|
end = start + dt_mem_next_cell(dt_root_size_cells,
|
|
|
|
&prop);
|
|
|
|
|
2023-06-15 03:20:42 +08:00
|
|
|
ret = __reserved_mem_alloc_in_range(size, align,
|
|
|
|
start, end, nomap, &base);
|
2014-02-28 21:42:48 +08:00
|
|
|
if (ret == 0) {
|
2021-06-16 17:27:44 +08:00
|
|
|
pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
|
2014-02-28 21:42:48 +08:00
|
|
|
uname, &base,
|
2021-06-16 17:27:44 +08:00
|
|
|
(unsigned long)(size / SZ_1M));
|
2014-02-28 21:42:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
len -= t_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
|
|
|
|
0, 0, nomap, &base);
|
|
|
|
if (ret == 0)
|
2021-06-16 17:27:44 +08:00
|
|
|
pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
|
|
|
|
uname, &base, (unsigned long)(size / SZ_1M));
|
2014-02-28 21:42:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (base == 0) {
|
2022-06-28 19:35:40 +08:00
|
|
|
pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
|
|
|
|
uname, (unsigned long)(size / SZ_1M));
|
2014-02-28 21:42:48 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
*res_base = base;
|
|
|
|
*res_size = size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-28 21:42:49 +08:00
|
|
|
static const struct of_device_id __rmem_of_table_sentinel
|
2020-10-22 10:36:07 +08:00
|
|
|
__used __section("__reservedmem_of_table_end");
|
2014-02-28 21:42:49 +08:00
|
|
|
|
2021-03-18 18:40:36 +08:00
|
|
|
/*
|
2020-05-11 23:04:57 +08:00
|
|
|
* __reserved_mem_init_node() - call region specific reserved memory init code
|
2014-02-28 21:42:49 +08:00
|
|
|
*/
|
|
|
|
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
|
|
|
|
{
|
|
|
|
extern const struct of_device_id __reservedmem_of_table[];
|
|
|
|
const struct of_device_id *i;
|
2019-02-19 15:45:00 +08:00
|
|
|
int ret = -ENOENT;
|
2014-02-28 21:42:49 +08:00
|
|
|
|
|
|
|
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
|
2017-05-11 23:15:10 +08:00
|
|
|
reservedmem_of_init_fn initfn = i->data;
|
2014-02-28 21:42:49 +08:00
|
|
|
const char *compat = i->compatible;
|
|
|
|
|
|
|
|
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
|
|
|
|
continue;
|
|
|
|
|
2019-02-19 15:45:00 +08:00
|
|
|
ret = initfn(rmem);
|
|
|
|
if (ret == 0) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_info("initialized node %s, compatible id %s\n",
|
2014-02-28 21:42:49 +08:00
|
|
|
rmem->name, compat);
|
2019-02-19 15:45:00 +08:00
|
|
|
break;
|
2014-02-28 21:42:49 +08:00
|
|
|
}
|
|
|
|
}
|
2019-02-19 15:45:00 +08:00
|
|
|
return ret;
|
2014-02-28 21:42:49 +08:00
|
|
|
}
|
|
|
|
|
2015-09-16 09:30:36 +08:00
|
|
|
static int __init __rmem_cmp(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct reserved_mem *ra = a, *rb = b;
|
|
|
|
|
2015-11-18 18:46:38 +08:00
|
|
|
if (ra->base < rb->base)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (ra->base > rb->base)
|
|
|
|
return 1;
|
|
|
|
|
2020-10-21 17:53:59 +08:00
|
|
|
/*
|
|
|
|
* Put the dynamic allocations (address == 0, size == 0) before static
|
|
|
|
* allocations at address 0x0 so that overlap detection works
|
|
|
|
* correctly.
|
|
|
|
*/
|
|
|
|
if (ra->size < rb->size)
|
|
|
|
return -1;
|
|
|
|
if (ra->size > rb->size)
|
|
|
|
return 1;
|
|
|
|
|
2023-06-15 03:20:43 +08:00
|
|
|
if (ra->fdt_node < rb->fdt_node)
|
|
|
|
return -1;
|
|
|
|
if (ra->fdt_node > rb->fdt_node)
|
|
|
|
return 1;
|
|
|
|
|
2015-11-18 18:46:38 +08:00
|
|
|
return 0;
|
2015-09-16 09:30:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init __rmem_check_for_overlap(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (reserved_mem_count < 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
|
|
|
|
__rmem_cmp, NULL);
|
|
|
|
for (i = 0; i < reserved_mem_count - 1; i++) {
|
|
|
|
struct reserved_mem *this, *next;
|
|
|
|
|
|
|
|
this = &reserved_mem[i];
|
|
|
|
next = &reserved_mem[i + 1];
|
2020-10-21 17:53:59 +08:00
|
|
|
|
2015-09-16 09:30:36 +08:00
|
|
|
if (this->base + this->size > next->base) {
|
|
|
|
phys_addr_t this_end, next_end;
|
|
|
|
|
|
|
|
this_end = this->base + this->size;
|
|
|
|
next_end = next->base + next->size;
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
|
2015-11-10 13:08:33 +08:00
|
|
|
this->name, &this->base, &this_end,
|
|
|
|
next->name, &next->base, &next_end);
|
2015-09-16 09:30:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
/**
|
2020-05-11 23:04:57 +08:00
|
|
|
* fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
|
2014-02-28 21:42:48 +08:00
|
|
|
*/
|
|
|
|
void __init fdt_init_reserved_mem(void)
|
|
|
|
{
|
|
|
|
int i;
|
2015-09-16 09:30:36 +08:00
|
|
|
|
|
|
|
/* check for overlapping reserved regions */
|
|
|
|
__rmem_check_for_overlap();
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
for (i = 0; i < reserved_mem_count; i++) {
|
|
|
|
struct reserved_mem *rmem = &reserved_mem[i];
|
|
|
|
unsigned long node = rmem->fdt_node;
|
2014-07-14 16:28:04 +08:00
|
|
|
int len;
|
|
|
|
const __be32 *prop;
|
2014-02-28 21:42:48 +08:00
|
|
|
int err = 0;
|
2020-07-30 17:23:53 +08:00
|
|
|
bool nomap;
|
2014-02-28 21:42:48 +08:00
|
|
|
|
2019-02-19 15:45:00 +08:00
|
|
|
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
|
2014-07-14 16:28:04 +08:00
|
|
|
prop = of_get_flat_dt_prop(node, "phandle", &len);
|
|
|
|
if (!prop)
|
|
|
|
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
|
|
|
|
if (prop)
|
|
|
|
rmem->phandle = of_read_number(prop, len/4);
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
if (rmem->size == 0)
|
|
|
|
err = __reserved_mem_alloc_size(node, rmem->name,
|
|
|
|
&rmem->base, &rmem->size);
|
2019-02-19 15:45:00 +08:00
|
|
|
if (err == 0) {
|
|
|
|
err = __reserved_mem_init_node(rmem);
|
|
|
|
if (err != 0 && err != -ENOENT) {
|
|
|
|
pr_info("node %s compatible matching fail\n",
|
|
|
|
rmem->name);
|
|
|
|
if (nomap)
|
2021-06-11 21:11:53 +08:00
|
|
|
memblock_clear_nomap(rmem->base, rmem->size);
|
2021-06-11 21:11:52 +08:00
|
|
|
else
|
2021-11-06 04:43:19 +08:00
|
|
|
memblock_phys_free(rmem->base,
|
|
|
|
rmem->size);
|
2023-02-10 00:09:55 +08:00
|
|
|
} else {
|
|
|
|
phys_addr_t end = rmem->base + rmem->size - 1;
|
|
|
|
bool reusable =
|
|
|
|
(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
|
|
|
|
|
2023-02-16 16:37:25 +08:00
|
|
|
pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
|
2023-02-10 00:09:55 +08:00
|
|
|
&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
|
|
|
|
nomap ? "nomap" : "map",
|
|
|
|
reusable ? "reusable" : "non-reusable",
|
|
|
|
rmem->name ? rmem->name : "unknown");
|
2019-02-19 15:45:00 +08:00
|
|
|
}
|
|
|
|
}
|
2014-02-28 21:42:48 +08:00
|
|
|
}
|
|
|
|
}
|
2014-07-14 16:28:04 +08:00
|
|
|
|
|
|
|
static inline struct reserved_mem *__find_rmem(struct device_node *node)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!node->phandle)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < reserved_mem_count; i++)
|
|
|
|
if (reserved_mem[i].phandle == node->phandle)
|
|
|
|
return &reserved_mem[i];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
struct rmem_assigned_device {
|
|
|
|
struct device *dev;
|
|
|
|
struct reserved_mem *rmem;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(of_rmem_assigned_device_list);
|
|
|
|
static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
|
|
|
|
|
2014-07-14 16:28:04 +08:00
|
|
|
/**
|
2016-05-24 21:31:24 +08:00
|
|
|
* of_reserved_mem_device_init_by_idx() - assign reserved memory region to
|
|
|
|
* given device
|
|
|
|
* @dev: Pointer to the device to configure
|
|
|
|
* @np: Pointer to the device_node with 'reserved-memory' property
|
|
|
|
* @idx: Index of selected region
|
2014-07-14 16:28:04 +08:00
|
|
|
*
|
2016-05-24 21:31:24 +08:00
|
|
|
* This function assigns respective DMA-mapping operations based on reserved
|
|
|
|
* memory region specified by 'memory-region' property in @np node to the @dev
|
|
|
|
* device. When driver needs to use more than one reserved memory region, it
|
|
|
|
* should allocate child devices and initialize regions by name for each of
|
|
|
|
* child device.
|
|
|
|
*
|
|
|
|
* Returns error code or zero on success.
|
2014-07-14 16:28:04 +08:00
|
|
|
*/
|
2016-05-24 21:31:24 +08:00
|
|
|
int of_reserved_mem_device_init_by_idx(struct device *dev,
|
|
|
|
struct device_node *np, int idx)
|
2014-07-14 16:28:04 +08:00
|
|
|
{
|
2016-05-24 21:31:24 +08:00
|
|
|
struct rmem_assigned_device *rd;
|
|
|
|
struct device_node *target;
|
2014-07-14 16:28:04 +08:00
|
|
|
struct reserved_mem *rmem;
|
2014-10-30 05:50:29 +08:00
|
|
|
int ret;
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
if (!np || !dev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
target = of_parse_phandle(np, "memory-region", idx);
|
|
|
|
if (!target)
|
2016-06-08 14:51:53 +08:00
|
|
|
return -ENODEV;
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2019-10-20 09:57:24 +08:00
|
|
|
if (!of_device_is_available(target)) {
|
|
|
|
of_node_put(target);
|
2019-05-22 18:47:11 +08:00
|
|
|
return 0;
|
2019-10-20 09:57:24 +08:00
|
|
|
}
|
2019-05-22 18:47:11 +08:00
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
rmem = __find_rmem(target);
|
|
|
|
of_node_put(target);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
|
|
|
if (!rmem || !rmem->ops || !rmem->ops->device_init)
|
2014-10-30 05:50:29 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
|
|
|
|
if (!rd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-10-30 05:50:29 +08:00
|
|
|
ret = rmem->ops->device_init(rmem, dev);
|
2016-05-24 21:31:24 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
rd->dev = dev;
|
|
|
|
rd->rmem = rmem;
|
|
|
|
|
|
|
|
mutex_lock(&of_rmem_assigned_device_mutex);
|
|
|
|
list_add(&rd->list, &of_rmem_assigned_device_list);
|
|
|
|
mutex_unlock(&of_rmem_assigned_device_mutex);
|
|
|
|
|
2014-10-30 05:50:29 +08:00
|
|
|
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
|
2016-05-24 21:31:24 +08:00
|
|
|
} else {
|
|
|
|
kfree(rd);
|
|
|
|
}
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2014-10-30 05:50:29 +08:00
|
|
|
return ret;
|
2014-07-14 16:28:04 +08:00
|
|
|
}
|
2016-05-24 21:31:24 +08:00
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2020-04-04 01:44:52 +08:00
|
|
|
/**
|
|
|
|
* of_reserved_mem_device_init_by_name() - assign named reserved memory region
|
|
|
|
* to given device
|
|
|
|
* @dev: pointer to the device to configure
|
|
|
|
* @np: pointer to the device node with 'memory-region' property
|
|
|
|
* @name: name of the selected memory region
|
|
|
|
*
|
|
|
|
* Returns: 0 on success or a negative error-code on failure.
|
|
|
|
*/
|
|
|
|
int of_reserved_mem_device_init_by_name(struct device *dev,
|
|
|
|
struct device_node *np,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
int idx = of_property_match_string(np, "memory-region-names", name);
|
|
|
|
|
|
|
|
return of_reserved_mem_device_init_by_idx(dev, np, idx);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
|
|
|
|
|
2014-07-14 16:28:04 +08:00
|
|
|
/**
|
|
|
|
* of_reserved_mem_device_release() - release reserved memory device structures
|
2016-05-24 21:31:24 +08:00
|
|
|
* @dev: Pointer to the device to deconfigure
|
2014-07-14 16:28:04 +08:00
|
|
|
*
|
|
|
|
* This function releases structures allocated for memory region handling for
|
|
|
|
* the given device.
|
|
|
|
*/
|
|
|
|
void of_reserved_mem_device_release(struct device *dev)
|
|
|
|
{
|
2020-04-04 01:46:57 +08:00
|
|
|
struct rmem_assigned_device *rd, *tmp;
|
|
|
|
LIST_HEAD(release_list);
|
2016-05-24 21:31:24 +08:00
|
|
|
|
|
|
|
mutex_lock(&of_rmem_assigned_device_mutex);
|
2020-04-04 01:46:57 +08:00
|
|
|
list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
|
|
|
|
if (rd->dev == dev)
|
|
|
|
list_move_tail(&rd->list, &release_list);
|
2016-05-24 21:31:24 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&of_rmem_assigned_device_mutex);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2020-04-04 01:46:57 +08:00
|
|
|
list_for_each_entry_safe(rd, tmp, &release_list, list) {
|
|
|
|
if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
|
|
|
|
rd->rmem->ops->device_release(rd->rmem, dev);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2020-04-04 01:46:57 +08:00
|
|
|
kfree(rd);
|
|
|
|
}
|
2014-07-14 16:28:04 +08:00
|
|
|
}
|
2015-01-09 22:29:05 +08:00
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
|
2017-10-11 13:08:54 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* of_reserved_mem_lookup() - acquire reserved_mem from a device node
|
|
|
|
* @np: node pointer of the desired reserved-memory region
|
|
|
|
*
|
|
|
|
* This function allows drivers to acquire a reference to the reserved_mem
|
|
|
|
* struct based on a device node handle.
|
|
|
|
*
|
|
|
|
* Returns a reserved_mem reference, or NULL on error.
|
|
|
|
*/
|
|
|
|
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
|
|
|
|
{
|
|
|
|
const char *name;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!np->full_name)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
name = kbasename(np->full_name);
|
|
|
|
for (i = 0; i < reserved_mem_count; i++)
|
|
|
|
if (!strcmp(reserved_mem[i].name, name))
|
|
|
|
return &reserved_mem[i];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
|