Merge branch 'devicetree/next-reserved-mem' into devicetree/next

This commit is contained in:
Grant Likely 2014-03-19 15:01:53 +00:00
commit ca3992bc0c
14 changed files with 577 additions and 0 deletions

View File

@ -0,0 +1,133 @@
*** Reserved memory regions ***
Reserved memory is specified as a node under the /reserved-memory node.
The operating system shall exclude reserved memory from normal usage
one can create child nodes describing particular reserved (excluded from
normal use) memory regions. Such memory regions are usually designed for
the special usage by various device drivers.
Parameters for each memory region can be encoded into the device tree
with the following nodes:
/reserved-memory node
---------------------
#address-cells, #size-cells (required) - standard definition
- Should use the same values as the root node
ranges (required) - standard definition
- Should be empty
/reserved-memory/ child nodes
-----------------------------
Each child of the reserved-memory node specifies one or more regions of
reserved memory. Each child node may either use a 'reg' property to
specify a specific range of reserved memory, or a 'size' property with
optional constraints to request a dynamically allocated block of memory.
Following the generic-names recommended practice, node names should
reflect the purpose of the node (ie. "framebuffer" or "dma-pool"). Unit
address (@<address>) should be appended to the name if the node is a
static allocation.
Properties:
Requires either a) or b) below.
a) static allocation
reg (required) - standard definition
b) dynamic allocation
size (required) - length based on parent's #size-cells
- Size in bytes of memory to reserve.
alignment (optional) - length based on parent's #size-cells
- Address boundary for alignment of allocation.
alloc-ranges (optional) - prop-encoded-array (address, length pairs).
- Specifies regions of memory that are
acceptable to allocate from.
If both reg and size are present, then the reg property takes precedence
and size is ignored.
Additional properties:
compatible (optional) - standard definition
- may contain the following strings:
- shared-dma-pool: This indicates a region of memory meant to be
used as a shared pool of DMA buffers for a set of devices. It can
be used by an operating system to instanciate the necessary pool
management subsystem if necessary.
- vendor specific string in the form <vendor>,[<device>-]<usage>
no-map (optional) - empty property
- Indicates the operating system must not create a virtual mapping
of the region as part of its standard mapping of system memory,
nor permit speculative access to it under any circumstances other
than under the control of the device driver using the region.
reusable (optional) - empty property
- The operating system can use the memory in this region with the
limitation that the device driver(s) owning the region need to be
able to reclaim it back. Typically that means that the operating
system can use that region to store volatile or cached data that
can be otherwise regenerated or migrated elsewhere.
Linux implementation note:
- If a "linux,cma-default" property is present, then Linux will use the
region for the default pool of the contiguous memory allocator.
Device node references to reserved memory
-----------------------------------------
Regions in the /reserved-memory node may be referenced by other device
nodes by adding a memory-region property to the device node.
memory-region (optional) - phandle, specifier pairs to children of /reserved-memory
Example
-------
This example defines 3 contiguous regions are defined for Linux kernel:
one default of all device drivers (named linux,cma@72000000 and 64MiB in size),
one dedicated to the framebuffer device (named framebuffer@78000000, 8MiB), and
one for multimedia processing (named multimedia-memory@77000000, 64MiB).
/ {
#address-cells = <1>;
#size-cells = <1>;
memory {
reg = <0x40000000 0x40000000>;
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/* global autoconfigured region for contiguous allocations */
linux,cma {
compatible = "shared-dma-pool";
reusable;
size = <0x4000000>;
alignment = <0x2000>;
linux,cma-default;
};
display_reserved: framebuffer@78000000 {
reg = <0x78000000 0x800000>;
};
multimedia_reserved: multimedia@77000000 {
compatible = "acme,multimedia-memory";
reg = <0x77000000 0x4000000>;
};
};
/* ... */
fb0: video@12300000 {
memory-region = <&display_reserved>;
/* ... */
};
scaler: scaler@12500000 {
memory-region = <&multimedia_reserved>;
/* ... */
};
codec: codec@12600000 {
memory-region = <&multimedia_reserved>;
/* ... */
};
};

View File

@ -1918,6 +1918,7 @@ config USE_OF
select IRQ_DOMAIN select IRQ_DOMAIN
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
help help
Include support for flattened device tree machine descriptions. Include support for flattened device tree machine descriptions.

View File

@ -323,6 +323,8 @@ void __init arm_memblock_init(struct meminfo *mi,
if (mdesc->reserve) if (mdesc->reserve)
mdesc->reserve(); mdesc->reserve();
early_init_fdt_scan_reserved_mem();
/* /*
* reserve memory for DMA contigouos allocations, * reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory * must come from DMA area inside low memory

View File

@ -43,6 +43,7 @@ config ARM64
select NO_BOOTMEM select NO_BOOTMEM
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select POWER_RESET select POWER_RESET
select POWER_SUPPLY select POWER_SUPPLY

View File

@ -160,6 +160,7 @@ void __init arm64_memblock_init(void)
memblock_reserve(base, size); memblock_reserve(base, size);
} }
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(0); dma_contiguous_reserve(0);
memblock_allow_resize(); memblock_allow_resize();

View File

@ -90,6 +90,7 @@ config PPC
select BINFMT_ELF select BINFMT_ELF
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER

View File

@ -33,6 +33,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/rtas.h> #include <asm/rtas.h>
@ -588,6 +589,8 @@ static void __init early_reserve_mem_dt(void)
memblock_reserve(base, size); memblock_reserve(base, size);
} }
} }
early_init_fdt_scan_reserved_mem();
} }
static void __init early_reserve_mem(void) static void __init early_reserve_mem(void)

View File

@ -71,4 +71,10 @@ config OF_MTD
depends on MTD depends on MTD
def_bool y def_bool y
config OF_RESERVED_MEM
depends on OF_EARLY_FLATTREE
bool
help
Helpers to allow for reservation of memory regions
endmenu # OF endmenu # OF

View File

@ -9,3 +9,4 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
obj-$(CONFIG_OF_MTD) += of_mtd.o obj-$(CONFIG_OF_MTD) += of_mtd.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o

View File

@ -15,6 +15,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/sizes.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -439,6 +441,129 @@ struct boot_param_header *initial_boot_params;
#ifdef CONFIG_OF_EARLY_FLATTREE #ifdef CONFIG_OF_EARLY_FLATTREE
/**
* res_mem_reserve_reg() - reserve all memory described in 'reg' property
*/
static int __init __reserved_mem_reserve_reg(unsigned long node,
const char *uname)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t base, size;
unsigned long len;
__be32 *prop;
int nomap, first = 1;
prop = of_get_flat_dt_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
if (len && len % t_len != 0) {
pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
while (len >= t_len) {
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (base && size &&
early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
else
pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
len -= t_len;
if (first) {
fdt_reserved_mem_save_node(node, uname, base, size);
first = 0;
}
}
return 0;
}
/**
* __reserved_mem_check_root() - check if #size-cells, #address-cells provided
* in /reserved-memory matches the values supported by the current implementation,
* also check if ranges property has been provided
*/
static int __reserved_mem_check_root(unsigned long node)
{
__be32 *prop;
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
return -EINVAL;
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
return -EINVAL;
prop = of_get_flat_dt_prop(node, "ranges", NULL);
if (!prop)
return -EINVAL;
return 0;
}
/**
* fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
*/
static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
int depth, void *data)
{
static int found;
const char *status;
int err;
if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
if (__reserved_mem_check_root(node) != 0) {
pr_err("Reserved memory: unsupported node format, ignoring\n");
/* break scan */
return 1;
}
found = 1;
/* scan next node */
return 0;
} else if (!found) {
/* scan next node */
return 0;
} else if (found && depth < 2) {
/* scanning of /reserved-memory has been finished */
return 1;
}
status = of_get_flat_dt_prop(node, "status", NULL);
if (status && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0)
return 0;
err = __reserved_mem_reserve_reg(node, uname);
if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
fdt_reserved_mem_save_node(node, uname, 0, 0);
/* scan next node */
return 0;
}
/**
* early_init_fdt_scan_reserved_mem() - create reserved memory regions
*
* This function grabs memory from early allocator for device exclusive use
* defined in device tree structures. It should be called by arch specific code
* once the early allocator (i.e. memblock) has been fully activated.
*/
void __init early_init_fdt_scan_reserved_mem(void)
{
if (!initial_boot_params)
return;
of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
fdt_init_reserved_mem();
}
/** /**
* of_scan_flat_dt - scan flattened tree blob and call callback on each. * of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function * @it: callback function
@ -856,6 +981,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size); memblock_add(base, size);
} }
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
if (memblock_is_region_reserved(base, size))
return -EBUSY;
if (nomap)
return memblock_remove(base, size);
return memblock_reserve(base, size);
}
/* /*
* called from unflatten_device_tree() to bootstrap devicetree itself * called from unflatten_device_tree() to bootstrap devicetree itself
* Architectures can override this definition if memblock isn't used * Architectures can override this definition if memblock isn't used
@ -864,6 +999,14 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
{ {
return __va(memblock_alloc(size, align)); return __va(memblock_alloc(size, align));
} }
#else
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
pr_err("Reserved memory not supported, ignoring range 0x%llx - 0x%llx%s\n",
base, size, nomap ? " (nomap)" : "");
return -ENOSYS;
}
#endif #endif
bool __init early_init_dt_scan(void *params) bool __init early_init_dt_scan(void *params)

View File

@ -0,0 +1,217 @@
/*
* Device tree based initialization code for reserved memory.
*
* Copyright (c) 2013, The Linux Foundation. All Rights Reserved.
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
* Author: Josh Cartwright <joshc@codeaurora.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License or (at your optional) any later version of the license.
*/
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/of_reserved_mem.h>
#define MAX_RESERVED_REGIONS 16
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
#if defined(CONFIG_HAVE_MEMBLOCK)
#include <linux/memblock.h>
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
/*
* We use __memblock_alloc_base() because memblock_alloc_base()
* panic()s on allocation failure.
*/
phys_addr_t base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
/*
* Check if the allocated region fits in to start..end window
*/
if (base < start) {
memblock_free(base, size);
return -ENOMEM;
}
*res_base = base;
if (nomap)
return memblock_remove(base, size);
return 0;
}
#else
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
size, nomap ? " (nomap)" : "");
return -ENOSYS;
}
#endif
/**
* res_mem_save_node() - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
pr_err("Reserved memory: not enough space all defined regions.\n");
return;
}
rmem->fdt_node = node;
rmem->name = uname;
rmem->base = base;
rmem->size = size;
reserved_mem_count++;
return;
}
/**
* res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
* and 'alloc-ranges' properties
*/
static int __init __reserved_mem_alloc_size(unsigned long node,
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t start = 0, end = 0;
phys_addr_t base = 0, align = 0, size;
unsigned long len;
__be32 *prop;
int nomap;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
if (!prop)
return -EINVAL;
if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("Reserved memory: invalid size property in '%s' node.\n",
uname);
return -EINVAL;
}
size = dt_mem_next_cell(dt_root_size_cells, &prop);
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("Reserved memory: invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
if (len % t_len != 0) {
pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
base = 0;
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
ret = early_init_dt_alloc_reserved_memory_arch(size,
align, start, end, nomap, &base);
if (ret == 0) {
pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base,
(unsigned long)size / SZ_1M);
break;
}
len -= t_len;
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
if (ret == 0)
pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
}
if (base == 0) {
pr_info("Reserved memory: failed to allocate memory for node '%s'\n",
uname);
return -ENOMEM;
}
*res_base = base;
*res_size = size;
return 0;
}
static const struct of_device_id __rmem_of_table_sentinel
__used __section(__reservedmem_of_table_end);
/**
* res_mem_init_node() - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
extern const struct of_device_id __reservedmem_of_table[];
const struct of_device_id *i;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
reservedmem_of_init_fn initfn = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
continue;
if (initfn(rmem, rmem->fdt_node, rmem->name) == 0) {
pr_info("Reserved memory: initialized node %s, compatible id %s\n",
rmem->name, compat);
return 0;
}
}
return -ENOENT;
}
/**
* fdt_init_reserved_mem - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem(void)
{
int i;
for (i = 0; i < reserved_mem_count; i++) {
struct reserved_mem *rmem = &reserved_mem[i];
unsigned long node = rmem->fdt_node;
int err = 0;
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
&rmem->base, &rmem->size);
if (err == 0)
__reserved_mem_init_node(rmem);
}
}

View File

@ -167,6 +167,16 @@
#define CLK_OF_TABLES() #define CLK_OF_TABLES()
#endif #endif
#ifdef CONFIG_OF_RESERVED_MEM
#define RESERVEDMEM_OF_TABLES() \
. = ALIGN(8); \
VMLINUX_SYMBOL(__reservedmem_of_table) = .; \
*(__reservedmem_of_table) \
*(__reservedmem_of_table_end)
#else
#define RESERVEDMEM_OF_TABLES()
#endif
#define KERNEL_DTB() \ #define KERNEL_DTB() \
STRUCT_ALIGN(); \ STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \ VMLINUX_SYMBOL(__dtb_start) = .; \
@ -490,6 +500,7 @@
TRACE_SYSCALLS() \ TRACE_SYSCALLS() \
MEM_DISCARD(init.rodata) \ MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \ CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \ CLKSRC_OF_TABLES() \
KERNEL_DTB() \ KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() IRQCHIP_OF_MATCH_TABLE()

View File

@ -98,7 +98,10 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data); int depth, void *data);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname, extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data); int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size); extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
extern u64 dt_mem_next_cell(int s, __be32 **cellp); extern u64 dt_mem_next_cell(int s, __be32 **cellp);
@ -118,6 +121,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *); extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *); extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_FLATTREE */ #else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {} static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {} static inline void unflatten_and_copy_device_tree(void) {}

View File

@ -0,0 +1,53 @@
#ifndef __OF_RESERVED_MEM_H
#define __OF_RESERVED_MEM_H
struct device;
struct of_phandle_args;
struct reserved_mem_ops;
struct reserved_mem {
const char *name;
unsigned long fdt_node;
const struct reserved_mem_ops *ops;
phys_addr_t base;
phys_addr_t size;
void *priv;
};
struct reserved_mem_ops {
void (*device_init)(struct reserved_mem *rmem,
struct device *dev);
void (*device_release)(struct reserved_mem *rmem,
struct device *dev);
};
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem,
unsigned long node, const char *uname);
#ifdef CONFIG_OF_RESERVED_MEM
void fdt_init_reserved_mem(void);
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
static const struct of_device_id __reservedmem_of_table_##name \
__used __section(__reservedmem_of_table) \
= { .compatible = compat, \
.data = (init == (reservedmem_of_init_fn)NULL) ? \
init : init }
#else
static inline void fdt_init_reserved_mem(void) { }
static inline void fdt_reserved_mem_save_node(unsigned long node,
const char *uname, phys_addr_t base, phys_addr_t size) { }
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
static const struct of_device_id __reservedmem_of_table_##name \
__attribute__((unused)) \
= { .compatible = compat, \
.data = (init == (reservedmem_of_init_fn)NULL) ? \
init : init }
#endif
#endif /* __OF_RESERVED_MEM_H */