2017-12-28 02:55:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2014-02-28 21:42:48 +08:00
|
|
|
/*
|
|
|
|
* Device tree based initialization code for reserved memory.
|
|
|
|
*
|
2015-09-16 09:30:36 +08:00
|
|
|
* Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
|
2014-02-28 21:42:48 +08:00
|
|
|
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com
|
|
|
|
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
|
|
|
|
* Author: Josh Cartwright <joshc@codeaurora.org>
|
|
|
|
*/
|
|
|
|
|
2016-06-15 21:32:18 +08:00
|
|
|
#define pr_fmt(fmt) "OF: reserved mem: " fmt
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_fdt.h>
|
|
|
|
#include <linux/of_platform.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sizes.h>
|
|
|
|
#include <linux/of_reserved_mem.h>
|
2015-09-16 09:30:36 +08:00
|
|
|
#include <linux/sort.h>
|
2016-05-24 21:31:24 +08:00
|
|
|
#include <linux/slab.h>
|
2018-10-31 06:07:44 +08:00
|
|
|
#include <linux/memblock.h>
|
2014-02-28 21:42:48 +08:00
|
|
|
|
2017-09-26 16:40:00 +08:00
|
|
|
#define MAX_RESERVED_REGIONS 32
|
2014-02-28 21:42:48 +08:00
|
|
|
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
|
|
|
|
static int reserved_mem_count;
|
|
|
|
|
2019-02-11 21:35:45 +08:00
|
|
|
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
|
2014-02-28 21:42:48 +08:00
|
|
|
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
|
|
|
|
phys_addr_t *res_base)
|
|
|
|
{
|
2016-02-22 21:45:44 +08:00
|
|
|
phys_addr_t base;
|
2019-03-12 14:29:31 +08:00
|
|
|
|
2016-02-22 21:45:44 +08:00
|
|
|
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
align = !align ? SMP_CACHE_BYTES : align;
|
of: fix kmemleak crash caused by imbalance in early memory reservation
Marc Gonzalez reported the following kmemleak crash:
Unable to handle kernel paging request at virtual address ffffffc021e00000
Mem abort info:
ESR = 0x96000006
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000006
CM = 0, WnR = 0
swapper pgtable: 4k pages, 39-bit VAs, pgdp = (____ptrval____) [ffffffc021e00000] pgd=000000017e3ba803, pud=000000017e3ba803, pmd=0000000000000000
Internal error: Oops: 96000006 [#1] PREEMPT SMP
Modules linked in:
CPU: 6 PID: 523 Comm: kmemleak Tainted: G S W 5.0.0-rc1 #13
Hardware name: Qualcomm Technologies, Inc. MSM8998 v1 MTP (DT)
pstate: 80000085 (Nzcv daIf -PAN -UAO)
pc : scan_block+0x70/0x190
lr : scan_block+0x6c/0x190
Process kmemleak (pid: 523, stack limit = 0x(____ptrval____))
Call trace:
scan_block+0x70/0x190
scan_gray_list+0x108/0x1c0
kmemleak_scan+0x33c/0x7c0
kmemleak_scan_thread+0x98/0xf0
kthread+0x11c/0x120
ret_from_fork+0x10/0x1c
Code: f9000fb4 d503201f 97ffffd2 35000580 (f9400260)
The crash happens when a no-map area is allocated in
early_init_dt_alloc_reserved_memory_arch(). The allocated region is
registered with kmemleak, but it is then removed from memblock using
memblock_remove() that is not kmemleak-aware.
Replacing memblock_phys_alloc_range() with memblock_find_in_range()
makes sure that the allocated memory is not added to kmemleak and then
memblock_remove()'ing this memory is safe.
As a bonus, since memblock_find_in_range() ensures the allocation in the
specified range, the bounds check can be removed.
[rppt@linux.ibm.com: of: fix parameters order for call to memblock_find_in_range()]
Link: http://lkml.kernel.org/r/20190221112619.GC32004@rapoport-lnx
Link: http://lkml.kernel.org/r/20190213181921.GB15270@rapoport-lnx
Fixes: 3f0c820664483 ("drivers: of: add initialization code for dynamic reserved memory")
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Prateek Patel <prpatel@nvidia.com>
Tested-by: Marc Gonzalez <marc.w.gonzalez@free.fr>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 14:30:58 +08:00
|
|
|
base = memblock_find_in_range(start, end, size, align);
|
2014-02-28 21:42:48 +08:00
|
|
|
if (!base)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
*res_base = base;
|
|
|
|
if (nomap)
|
|
|
|
return memblock_remove(base, size);
|
of: fix kmemleak crash caused by imbalance in early memory reservation
Marc Gonzalez reported the following kmemleak crash:
Unable to handle kernel paging request at virtual address ffffffc021e00000
Mem abort info:
ESR = 0x96000006
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000006
CM = 0, WnR = 0
swapper pgtable: 4k pages, 39-bit VAs, pgdp = (____ptrval____) [ffffffc021e00000] pgd=000000017e3ba803, pud=000000017e3ba803, pmd=0000000000000000
Internal error: Oops: 96000006 [#1] PREEMPT SMP
Modules linked in:
CPU: 6 PID: 523 Comm: kmemleak Tainted: G S W 5.0.0-rc1 #13
Hardware name: Qualcomm Technologies, Inc. MSM8998 v1 MTP (DT)
pstate: 80000085 (Nzcv daIf -PAN -UAO)
pc : scan_block+0x70/0x190
lr : scan_block+0x6c/0x190
Process kmemleak (pid: 523, stack limit = 0x(____ptrval____))
Call trace:
scan_block+0x70/0x190
scan_gray_list+0x108/0x1c0
kmemleak_scan+0x33c/0x7c0
kmemleak_scan_thread+0x98/0xf0
kthread+0x11c/0x120
ret_from_fork+0x10/0x1c
Code: f9000fb4 d503201f 97ffffd2 35000580 (f9400260)
The crash happens when a no-map area is allocated in
early_init_dt_alloc_reserved_memory_arch(). The allocated region is
registered with kmemleak, but it is then removed from memblock using
memblock_remove() that is not kmemleak-aware.
Replacing memblock_phys_alloc_range() with memblock_find_in_range()
makes sure that the allocated memory is not added to kmemleak and then
memblock_remove()'ing this memory is safe.
As a bonus, since memblock_find_in_range() ensures the allocation in the
specified range, the bounds check can be removed.
[rppt@linux.ibm.com: of: fix parameters order for call to memblock_find_in_range()]
Link: http://lkml.kernel.org/r/20190221112619.GC32004@rapoport-lnx
Link: http://lkml.kernel.org/r/20190213181921.GB15270@rapoport-lnx
Fixes: 3f0c820664483 ("drivers: of: add initialization code for dynamic reserved memory")
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Prateek Patel <prpatel@nvidia.com>
Tested-by: Marc Gonzalez <marc.w.gonzalez@free.fr>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 14:30:58 +08:00
|
|
|
|
|
|
|
return memblock_reserve(base, size);
|
2014-02-28 21:42:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* res_mem_save_node() - save fdt node for second pass initialization
|
|
|
|
*/
|
|
|
|
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
|
|
|
|
phys_addr_t base, phys_addr_t size)
|
|
|
|
{
|
|
|
|
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
|
|
|
|
|
|
|
|
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("not enough space all defined regions.\n");
|
2014-02-28 21:42:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rmem->fdt_node = node;
|
|
|
|
rmem->name = uname;
|
|
|
|
rmem->base = base;
|
|
|
|
rmem->size = size;
|
|
|
|
|
|
|
|
reserved_mem_count++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
|
|
|
|
* and 'alloc-ranges' properties
|
|
|
|
*/
|
|
|
|
static int __init __reserved_mem_alloc_size(unsigned long node,
|
|
|
|
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
|
|
|
|
{
|
|
|
|
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
|
|
|
|
phys_addr_t start = 0, end = 0;
|
|
|
|
phys_addr_t base = 0, align = 0, size;
|
2014-04-02 12:49:03 +08:00
|
|
|
int len;
|
|
|
|
const __be32 *prop;
|
2014-02-28 21:42:48 +08:00
|
|
|
int nomap;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
prop = of_get_flat_dt_prop(node, "size", &len);
|
|
|
|
if (!prop)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (len != dt_root_size_cells * sizeof(__be32)) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("invalid size property in '%s' node.\n", uname);
|
2014-02-28 21:42:48 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
size = dt_mem_next_cell(dt_root_size_cells, &prop);
|
|
|
|
|
|
|
|
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
|
|
|
|
|
|
|
|
prop = of_get_flat_dt_prop(node, "alignment", &len);
|
|
|
|
if (prop) {
|
|
|
|
if (len != dt_root_addr_cells * sizeof(__be32)) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("invalid alignment property in '%s' node.\n",
|
2014-02-28 21:42:48 +08:00
|
|
|
uname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
align = dt_mem_next_cell(dt_root_addr_cells, &prop);
|
|
|
|
}
|
|
|
|
|
2015-11-10 20:30:26 +08:00
|
|
|
/* Need adjust the alignment to satisfy the CMA requirement */
|
2016-05-25 12:29:50 +08:00
|
|
|
if (IS_ENABLED(CONFIG_CMA)
|
|
|
|
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
|
|
|
|
&& of_get_flat_dt_prop(node, "reusable", NULL)
|
2016-05-31 07:38:56 +08:00
|
|
|
&& !of_get_flat_dt_prop(node, "no-map", NULL)) {
|
|
|
|
unsigned long order =
|
|
|
|
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
|
|
|
|
|
|
|
|
align = max(align, (phys_addr_t)PAGE_SIZE << order);
|
|
|
|
}
|
2015-11-10 20:30:26 +08:00
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
|
|
|
|
if (prop) {
|
|
|
|
|
|
|
|
if (len % t_len != 0) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
|
2014-02-28 21:42:48 +08:00
|
|
|
uname);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
base = 0;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
|
|
|
|
end = start + dt_mem_next_cell(dt_root_size_cells,
|
|
|
|
&prop);
|
|
|
|
|
|
|
|
ret = early_init_dt_alloc_reserved_memory_arch(size,
|
|
|
|
align, start, end, nomap, &base);
|
|
|
|
if (ret == 0) {
|
2024-06-12 13:13:20 +08:00
|
|
|
pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
|
2014-02-28 21:42:48 +08:00
|
|
|
uname, &base,
|
2024-06-12 13:13:20 +08:00
|
|
|
(unsigned long)(size / SZ_1M));
|
2014-02-28 21:42:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
len -= t_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
|
|
|
|
0, 0, nomap, &base);
|
|
|
|
if (ret == 0)
|
2024-06-12 13:13:20 +08:00
|
|
|
pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
|
|
|
|
uname, &base, (unsigned long)(size / SZ_1M));
|
2014-02-28 21:42:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (base == 0) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_info("failed to allocate memory for node '%s'\n", uname);
|
2014-02-28 21:42:48 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
*res_base = base;
|
|
|
|
*res_size = size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-28 21:42:49 +08:00
|
|
|
static const struct of_device_id __rmem_of_table_sentinel
|
|
|
|
__used __section(__reservedmem_of_table_end);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* res_mem_init_node() - call region specific reserved memory init code
|
|
|
|
*/
|
|
|
|
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
|
|
|
|
{
|
|
|
|
extern const struct of_device_id __reservedmem_of_table[];
|
|
|
|
const struct of_device_id *i;
|
2019-02-19 15:45:00 +08:00
|
|
|
int ret = -ENOENT;
|
2014-02-28 21:42:49 +08:00
|
|
|
|
|
|
|
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
|
2017-05-11 23:15:10 +08:00
|
|
|
reservedmem_of_init_fn initfn = i->data;
|
2014-02-28 21:42:49 +08:00
|
|
|
const char *compat = i->compatible;
|
|
|
|
|
|
|
|
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
|
|
|
|
continue;
|
|
|
|
|
2019-02-19 15:45:00 +08:00
|
|
|
ret = initfn(rmem);
|
|
|
|
if (ret == 0) {
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_info("initialized node %s, compatible id %s\n",
|
2014-02-28 21:42:49 +08:00
|
|
|
rmem->name, compat);
|
2019-02-19 15:45:00 +08:00
|
|
|
break;
|
2014-02-28 21:42:49 +08:00
|
|
|
}
|
|
|
|
}
|
2019-02-19 15:45:00 +08:00
|
|
|
return ret;
|
2014-02-28 21:42:49 +08:00
|
|
|
}
|
|
|
|
|
2015-09-16 09:30:36 +08:00
|
|
|
static int __init __rmem_cmp(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct reserved_mem *ra = a, *rb = b;
|
|
|
|
|
2015-11-18 18:46:38 +08:00
|
|
|
if (ra->base < rb->base)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (ra->base > rb->base)
|
|
|
|
return 1;
|
|
|
|
|
2024-06-11 20:26:44 +08:00
|
|
|
/*
|
|
|
|
* Put the dynamic allocations (address == 0, size == 0) before static
|
|
|
|
* allocations at address 0x0 so that overlap detection works
|
|
|
|
* correctly.
|
|
|
|
*/
|
|
|
|
if (ra->size < rb->size)
|
|
|
|
return -1;
|
|
|
|
if (ra->size > rb->size)
|
|
|
|
return 1;
|
|
|
|
|
2015-11-18 18:46:38 +08:00
|
|
|
return 0;
|
2015-09-16 09:30:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init __rmem_check_for_overlap(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (reserved_mem_count < 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
|
|
|
|
__rmem_cmp, NULL);
|
|
|
|
for (i = 0; i < reserved_mem_count - 1; i++) {
|
|
|
|
struct reserved_mem *this, *next;
|
|
|
|
|
|
|
|
this = &reserved_mem[i];
|
|
|
|
next = &reserved_mem[i + 1];
|
2024-06-11 20:26:44 +08:00
|
|
|
|
2015-09-16 09:30:36 +08:00
|
|
|
if (this->base + this->size > next->base) {
|
|
|
|
phys_addr_t this_end, next_end;
|
|
|
|
|
|
|
|
this_end = this->base + this->size;
|
|
|
|
next_end = next->base + next->size;
|
2016-06-15 21:32:18 +08:00
|
|
|
pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
|
2015-11-10 13:08:33 +08:00
|
|
|
this->name, &this->base, &this_end,
|
|
|
|
next->name, &next->base, &next_end);
|
2015-09-16 09:30:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
/**
|
|
|
|
* fdt_init_reserved_mem - allocate and init all saved reserved memory regions
|
|
|
|
*/
|
|
|
|
void __init fdt_init_reserved_mem(void)
|
|
|
|
{
|
|
|
|
int i;
|
2015-09-16 09:30:36 +08:00
|
|
|
|
|
|
|
/* check for overlapping reserved regions */
|
|
|
|
__rmem_check_for_overlap();
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
for (i = 0; i < reserved_mem_count; i++) {
|
|
|
|
struct reserved_mem *rmem = &reserved_mem[i];
|
|
|
|
unsigned long node = rmem->fdt_node;
|
2014-07-14 16:28:04 +08:00
|
|
|
int len;
|
|
|
|
const __be32 *prop;
|
2014-02-28 21:42:48 +08:00
|
|
|
int err = 0;
|
2019-02-19 15:45:00 +08:00
|
|
|
int nomap;
|
2014-02-28 21:42:48 +08:00
|
|
|
|
2019-02-19 15:45:00 +08:00
|
|
|
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
|
2014-07-14 16:28:04 +08:00
|
|
|
prop = of_get_flat_dt_prop(node, "phandle", &len);
|
|
|
|
if (!prop)
|
|
|
|
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
|
|
|
|
if (prop)
|
|
|
|
rmem->phandle = of_read_number(prop, len/4);
|
|
|
|
|
2014-02-28 21:42:48 +08:00
|
|
|
if (rmem->size == 0)
|
|
|
|
err = __reserved_mem_alloc_size(node, rmem->name,
|
|
|
|
&rmem->base, &rmem->size);
|
2019-02-19 15:45:00 +08:00
|
|
|
if (err == 0) {
|
|
|
|
err = __reserved_mem_init_node(rmem);
|
|
|
|
if (err != 0 && err != -ENOENT) {
|
|
|
|
pr_info("node %s compatible matching fail\n",
|
|
|
|
rmem->name);
|
|
|
|
memblock_free(rmem->base, rmem->size);
|
|
|
|
if (nomap)
|
|
|
|
memblock_add(rmem->base, rmem->size);
|
|
|
|
}
|
|
|
|
}
|
2014-02-28 21:42:48 +08:00
|
|
|
}
|
|
|
|
}
|
2014-07-14 16:28:04 +08:00
|
|
|
|
|
|
|
static inline struct reserved_mem *__find_rmem(struct device_node *node)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!node->phandle)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < reserved_mem_count; i++)
|
|
|
|
if (reserved_mem[i].phandle == node->phandle)
|
|
|
|
return &reserved_mem[i];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
struct rmem_assigned_device {
|
|
|
|
struct device *dev;
|
|
|
|
struct reserved_mem *rmem;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(of_rmem_assigned_device_list);
|
|
|
|
static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
|
|
|
|
|
2014-07-14 16:28:04 +08:00
|
|
|
/**
|
2016-05-24 21:31:24 +08:00
|
|
|
* of_reserved_mem_device_init_by_idx() - assign reserved memory region to
|
|
|
|
* given device
|
|
|
|
* @dev: Pointer to the device to configure
|
|
|
|
* @np: Pointer to the device_node with 'reserved-memory' property
|
|
|
|
* @idx: Index of selected region
|
2014-07-14 16:28:04 +08:00
|
|
|
*
|
2016-05-24 21:31:24 +08:00
|
|
|
* This function assigns respective DMA-mapping operations based on reserved
|
|
|
|
* memory region specified by 'memory-region' property in @np node to the @dev
|
|
|
|
* device. When driver needs to use more than one reserved memory region, it
|
|
|
|
* should allocate child devices and initialize regions by name for each of
|
|
|
|
* child device.
|
|
|
|
*
|
|
|
|
* Returns error code or zero on success.
|
2014-07-14 16:28:04 +08:00
|
|
|
*/
|
2016-05-24 21:31:24 +08:00
|
|
|
int of_reserved_mem_device_init_by_idx(struct device *dev,
|
|
|
|
struct device_node *np, int idx)
|
2014-07-14 16:28:04 +08:00
|
|
|
{
|
2016-05-24 21:31:24 +08:00
|
|
|
struct rmem_assigned_device *rd;
|
|
|
|
struct device_node *target;
|
2014-07-14 16:28:04 +08:00
|
|
|
struct reserved_mem *rmem;
|
2014-10-30 05:50:29 +08:00
|
|
|
int ret;
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
if (!np || !dev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
target = of_parse_phandle(np, "memory-region", idx);
|
|
|
|
if (!target)
|
2016-06-08 14:51:53 +08:00
|
|
|
return -ENODEV;
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2019-10-20 09:57:24 +08:00
|
|
|
if (!of_device_is_available(target)) {
|
|
|
|
of_node_put(target);
|
2019-05-22 18:47:11 +08:00
|
|
|
return 0;
|
2019-10-20 09:57:24 +08:00
|
|
|
}
|
2019-05-22 18:47:11 +08:00
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
rmem = __find_rmem(target);
|
|
|
|
of_node_put(target);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
|
|
|
if (!rmem || !rmem->ops || !rmem->ops->device_init)
|
2014-10-30 05:50:29 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-05-24 21:31:24 +08:00
|
|
|
rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
|
|
|
|
if (!rd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-10-30 05:50:29 +08:00
|
|
|
ret = rmem->ops->device_init(rmem, dev);
|
2016-05-24 21:31:24 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
rd->dev = dev;
|
|
|
|
rd->rmem = rmem;
|
|
|
|
|
|
|
|
mutex_lock(&of_rmem_assigned_device_mutex);
|
|
|
|
list_add(&rd->list, &of_rmem_assigned_device_list);
|
|
|
|
mutex_unlock(&of_rmem_assigned_device_mutex);
|
|
|
|
|
2014-10-30 05:50:29 +08:00
|
|
|
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
|
2016-05-24 21:31:24 +08:00
|
|
|
} else {
|
|
|
|
kfree(rd);
|
|
|
|
}
|
2014-07-14 16:28:04 +08:00
|
|
|
|
2014-10-30 05:50:29 +08:00
|
|
|
return ret;
|
2014-07-14 16:28:04 +08:00
|
|
|
}
|
2016-05-24 21:31:24 +08:00
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* of_reserved_mem_device_release() - release reserved memory device structures
|
2016-05-24 21:31:24 +08:00
|
|
|
* @dev: Pointer to the device to deconfigure
|
2014-07-14 16:28:04 +08:00
|
|
|
*
|
|
|
|
* This function releases structures allocated for memory region handling for
|
|
|
|
* the given device.
|
|
|
|
*/
|
|
|
|
void of_reserved_mem_device_release(struct device *dev)
|
|
|
|
{
|
2016-05-24 21:31:24 +08:00
|
|
|
struct rmem_assigned_device *rd;
|
|
|
|
struct reserved_mem *rmem = NULL;
|
|
|
|
|
|
|
|
mutex_lock(&of_rmem_assigned_device_mutex);
|
|
|
|
list_for_each_entry(rd, &of_rmem_assigned_device_list, list) {
|
|
|
|
if (rd->dev == dev) {
|
|
|
|
rmem = rd->rmem;
|
|
|
|
list_del(&rd->list);
|
|
|
|
kfree(rd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&of_rmem_assigned_device_mutex);
|
2014-07-14 16:28:04 +08:00
|
|
|
|
|
|
|
if (!rmem || !rmem->ops || !rmem->ops->device_release)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rmem->ops->device_release(rmem, dev);
|
|
|
|
}
|
2015-01-09 22:29:05 +08:00
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
|
2017-10-11 13:08:54 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* of_reserved_mem_lookup() - acquire reserved_mem from a device node
|
|
|
|
* @np: node pointer of the desired reserved-memory region
|
|
|
|
*
|
|
|
|
* This function allows drivers to acquire a reference to the reserved_mem
|
|
|
|
* struct based on a device node handle.
|
|
|
|
*
|
|
|
|
* Returns a reserved_mem reference, or NULL on error.
|
|
|
|
*/
|
|
|
|
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
|
|
|
|
{
|
|
|
|
const char *name;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!np->full_name)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
name = kbasename(np->full_name);
|
|
|
|
for (i = 0; i < reserved_mem_count; i++)
|
|
|
|
if (!strcmp(reserved_mem[i].name, name))
|
|
|
|
return &reserved_mem[i];
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
|