OpenCloudOS-Kernel/drivers/of/fdt.c

1298 lines
32 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Functions for working with the Flattened Device Tree data format
*
* Copyright 2009 Benjamin Herrenschmidt, IBM Corp
* benh@kernel.crashing.org
*/
#define pr_fmt(fmt) "OF: fdt: " fmt
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/libfdt.h>
#include <linux/debugfs.h>
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
#include "of_private.h"
/*
* of_fdt_limit_memory - limit the number of regions in the /memory node
* @limit: maximum entries
*
* Adjust the flattened device tree to have at most 'limit' number of
* memory entries in the /memory node. This function may be called
* any time after initial_boot_param is set.
*/
void __init of_fdt_limit_memory(int limit)
{
int memory;
int len;
const void *val;
int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
const __be32 *addr_prop;
const __be32 *size_prop;
int root_offset;
int cell_size;
root_offset = fdt_path_offset(initial_boot_params, "/");
if (root_offset < 0)
return;
addr_prop = fdt_getprop(initial_boot_params, root_offset,
"#address-cells", NULL);
if (addr_prop)
nr_address_cells = fdt32_to_cpu(*addr_prop);
size_prop = fdt_getprop(initial_boot_params, root_offset,
"#size-cells", NULL);
if (size_prop)
nr_size_cells = fdt32_to_cpu(*size_prop);
cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
memory = fdt_path_offset(initial_boot_params, "/memory");
if (memory > 0) {
val = fdt_getprop(initial_boot_params, memory, "reg", &len);
if (len > limit*cell_size) {
len = limit*cell_size;
pr_debug("Limiting number of entries to %d\n", limit);
fdt_setprop(initial_boot_params, memory, "reg", val,
len);
}
}
}
static bool of_fdt_device_is_available(const void *blob, unsigned long node)
{
const char *status = fdt_getprop(blob, node, "status", NULL);
if (!status)
return true;
if (!strcmp(status, "ok") || !strcmp(status, "okay"))
return true;
return false;
}
static void *unflatten_dt_alloc(void **mem, unsigned long size,
unsigned long align)
{
void *res;
*mem = PTR_ALIGN(*mem, align);
res = *mem;
*mem += size;
return res;
}
static void populate_properties(const void *blob,
int offset,
void **mem,
struct device_node *np,
const char *nodename,
bool dryrun)
{
struct property *pp, **pprev = NULL;
int cur;
bool has_name = false;
pprev = &np->properties;
for (cur = fdt_first_property_offset(blob, offset);
cur >= 0;
cur = fdt_next_property_offset(blob, cur)) {
const __be32 *val;
const char *pname;
u32 sz;
val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
if (!val) {
pr_warn("Cannot locate property at 0x%x\n", cur);
continue;
}
if (!pname) {
pr_warn("Cannot find property name at 0x%x\n", cur);
continue;
}
if (!strcmp(pname, "name"))
has_name = true;
pp = unflatten_dt_alloc(mem, sizeof(struct property),
__alignof__(struct property));
if (dryrun)
continue;
/* We accept flattened tree phandles either in
* ePAPR-style "phandle" properties, or the
* legacy "linux,phandle" properties. If both
* appear and have different values, things
* will get weird. Don't do that.
*/
if (!strcmp(pname, "phandle") ||
!strcmp(pname, "linux,phandle")) {
if (!np->phandle)
np->phandle = be32_to_cpup(val);
}
/* And we process the "ibm,phandle" property
* used in pSeries dynamic device tree
* stuff
*/
if (!strcmp(pname, "ibm,phandle"))
np->phandle = be32_to_cpup(val);
pp->name = (char *)pname;
pp->length = sz;
pp->value = (__be32 *)val;
*pprev = pp;
pprev = &pp->next;
}
/* With version 0x10 we may not have the name property,
* recreate it here from the unit name if absent
*/
if (!has_name) {
const char *p = nodename, *ps = p, *pa = NULL;
int len;
while (*p) {
if ((*p) == '@')
pa = p;
else if ((*p) == '/')
ps = p + 1;
p++;
}
if (pa < ps)
pa = p;
len = (pa - ps) + 1;
pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
__alignof__(struct property));
if (!dryrun) {
pp->name = "name";
pp->length = len;
pp->value = pp + 1;
*pprev = pp;
pprev = &pp->next;
memcpy(pp->value, ps, len - 1);
((char *)pp->value)[len - 1] = 0;
pr_debug("fixed up name for %s -> %s\n",
nodename, (char *)pp->value);
}
}
if (!dryrun)
*pprev = NULL;
}
static bool populate_node(const void *blob,
int offset,
void **mem,
struct device_node *dad,
struct device_node **pnp,
bool dryrun)
{
struct device_node *np;
const char *pathp;
unsigned int l, allocl;
pathp = fdt_get_name(blob, offset, &l);
if (!pathp) {
*pnp = NULL;
return false;
}
allocl = ++l;
np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
memcpy(fn, pathp, l);
if (dad != NULL) {
np->parent = dad;
np->sibling = dad->child;
dad->child = np;
}
}
populate_properties(blob, offset, mem, np, pathp, dryrun);
if (!dryrun) {
np->name = of_get_property(np, "name", NULL);
if (!np->name)
np->name = "<NULL>";
}
*pnp = np;
return true;
}
static void reverse_nodes(struct device_node *parent)
{
struct device_node *child, *next;
/* In-depth first */
child = parent->child;
while (child) {
reverse_nodes(child);
child = child->sibling;
}
/* Reverse the nodes in the child list */
child = parent->child;
parent->child = NULL;
while (child) {
next = child->sibling;
child->sibling = parent->child;
parent->child = child;
child = next;
}
}
/**
* unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
* @blob: The parent device tree blob
* @mem: Memory chunk to use for allocating device nodes and properties
* @dad: Parent struct device_node
* @nodepp: The device_node tree created by the call
*
* It returns the size of unflattened device tree or error code
*/
static int unflatten_dt_nodes(const void *blob,
void *mem,
struct device_node *dad,
struct device_node **nodepp)
{
struct device_node *root;
int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
struct device_node *nps[FDT_MAX_DEPTH];
void *base = mem;
bool dryrun = !base;
if (nodepp)
*nodepp = NULL;
/*
* We're unflattening device sub-tree if @dad is valid. There are
* possibly multiple nodes in the first level of depth. We need
* set @depth to 1 to make fdt_next_node() happy as it bails
* immediately when negative @depth is found. Otherwise, the device
* nodes except the first one won't be unflattened successfully.
*/
if (dad)
depth = initial_depth = 1;
root = dad;
nps[depth] = dad;
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
!of_fdt_device_is_available(blob, offset))
continue;
if (!populate_node(blob, offset, &mem, nps[depth],
&nps[depth+1], dryrun))
return mem - base;
if (!dryrun && nodepp && !*nodepp)
*nodepp = nps[depth+1];
if (!dryrun && !root)
root = nps[depth+1];
}
if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
pr_err("Error %d processing FDT\n", offset);
return -EINVAL;
}
/*
* Reverse the child list. Some drivers assumes node order matches .dts
* node order
*/
if (!dryrun)
reverse_nodes(root);
return mem - base;
}
/**
* __unflatten_device_tree - create tree of device_nodes from flat blob
*
* unflattens a device-tree, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used.
* @blob: The blob to expand
* @dad: Parent device node
* @mynodes: The device_node tree created by the call
* @dt_alloc: An allocator that provides a virtual address to memory
* for the resulting tree
* @detached: if true set OF_DETACHED on @mynodes
*
* Returns NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
void *__unflatten_device_tree(const void *blob,
struct device_node *dad,
struct device_node **mynodes,
void *(*dt_alloc)(u64 size, u64 align),
bool detached)
{
int size;
void *mem;
pr_debug(" -> unflatten_device_tree()\n");
if (!blob) {
pr_debug("No device tree pointer\n");
return NULL;
}
pr_debug("Unflattening device tree:\n");
pr_debug("magic: %08x\n", fdt_magic(blob));
pr_debug("size: %08x\n", fdt_totalsize(blob));
pr_debug("version: %08x\n", fdt_version(blob));
if (fdt_check_header(blob)) {
pr_err("Invalid device tree blob header\n");
return NULL;
}
/* First pass, scan for size */
size = unflatten_dt_nodes(blob, NULL, dad, NULL);
if (size < 0)
return NULL;
size = ALIGN(size, 4);
pr_debug(" size is %d, allocating...\n", size);
/* Allocate memory for the expanded device tree */
mem = dt_alloc(size + 4, __alignof__(struct device_node));
if (!mem)
return NULL;
memset(mem, 0, size);
*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
unflatten_dt_nodes(blob, mem, dad, mynodes);
if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warn("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
drivers/of: Validate device node in __unflatten_device_tree() @mynodes is set to NULL when __unflatten_device_tree() is called to unflatten device sub-tree in PCI hot add scenario on PowerPC PowerNV platform. Marking @mynodes detached unconditionally causes kernel crash as below backtrace shows: Unable to handle kernel paging request for data at address 0x00000000 Faulting instruction address: 0xc000000000b26f64 cpu 0x0: Vector: 300 (Data Access) at [c000003fcc7cf740] pc: c000000000b26f64: __unflatten_device_tree+0xf4/0x190 lr: c000000000b26f40: __unflatten_device_tree+0xd0/0x190 sp: c000003fcc7cf9c0 msr: 900000000280b033 dar: 0 dsisr: 40000000 current = 0xc000003fcc281680 paca = 0xc00000000ff00000 softe: 0 irq_happened: 0x01 pid = 2724, comm = sh Linux version 4.7.0-gavin-07754-g92a6836 (gwshan@gwshan) (gcc version \ 4.9.3 (Buildroot 2016.02-rc2-00093-g5ea3bce) ) #539 SMP Mon Aug 1 \ 12:40:29 AEST 2016 enter ? for help [c000003fcc7cfa50] c000000000b27060 of_fdt_unflatten_tree+0x60/0x90 [c000003fcc7cfaa0] c0000000004c6288 pnv_php_set_slot_power_state+0x118/0x440 [c000003fcc7cfb80] c0000000004c6a10 pnv_php_enable+0xc0/0x170 [c000003fcc7cfbd0] c0000000004c4d80 power_write_file+0xa0/0x190 [c000003fcc7cfc50] c0000000004be93c pci_slot_attr_store+0x3c/0x60 [c000003fcc7cfc70] c0000000002d3fd4 sysfs_kf_write+0x94/0xc0 [c000003fcc7cfcb0] c0000000002d2c30 kernfs_fop_write+0x180/0x260 [c000003fcc7cfd00] c000000000230fe0 __vfs_write+0x40/0x190 [c000003fcc7cfd90] c000000000232278 vfs_write+0xc8/0x240 [c000003fcc7cfde0] c000000000233d90 SyS_write+0x60/0x110 [c000003fcc7cfe30] c000000000009524 system_call+0x38/0x108 This avoids the kernel crash by marking @mynodes detached only when @mynodes is dereferencing valid device node in __unflatten_device_tree(). Fixes: 1d1bde550ea3 ("of: fdt: mark unflattened tree as detached") Reported-by: Meng Li <shlimeng@cn.ibm.com> Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Signed-off-by: Rob Herring <robh@kernel.org>
2016-08-01 15:17:53 +08:00
if (detached && mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
pr_debug("unflattened tree is detached\n");
}
pr_debug(" <- unflatten_device_tree()\n");
return mem;
}
static void *kernel_tree_alloc(u64 size, u64 align)
{
return kzalloc(size, GFP_KERNEL);
}
static DEFINE_MUTEX(of_fdt_unflatten_mutex);
/**
* of_fdt_unflatten_tree - create tree of device_nodes from flat blob
* @blob: Flat device tree blob
* @dad: Parent device node
* @mynodes: The device tree created by the call
*
* unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used.
*
* Returns NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
void *of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node *dad,
struct device_node **mynodes)
{
void *mem;
mutex_lock(&of_fdt_unflatten_mutex);
mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
true);
mutex_unlock(&of_fdt_unflatten_mutex);
return mem;
}
EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
/* Everything below here references initial_boot_params directly. */
int __initdata dt_root_addr_cells;
int __initdata dt_root_size_cells;
void *initial_boot_params __ro_after_init;
#ifdef CONFIG_OF_EARLY_FLATTREE
static u32 of_fdt_crc32;
/**
* __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
*/
static int __init __reserved_mem_reserve_reg(unsigned long node,
const char *uname)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t base, size;
int len;
const __be32 *prop;
int first = 1;
bool nomap;
prop = of_get_flat_dt_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
if (len && len % t_len != 0) {
pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
while (len >= t_len) {
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
else
pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
len -= t_len;
if (first) {
fdt_reserved_mem_save_node(node, uname, base, size);
first = 0;
}
}
return 0;
}
/**
* __reserved_mem_check_root() - check if #size-cells, #address-cells provided
* in /reserved-memory matches the values supported by the current implementation,
* also check if ranges property has been provided
*/
static int __init __reserved_mem_check_root(unsigned long node)
{
const __be32 *prop;
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
return -EINVAL;
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
return -EINVAL;
prop = of_get_flat_dt_prop(node, "ranges", NULL);
if (!prop)
return -EINVAL;
return 0;
}
/**
* fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
*/
static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
int depth, void *data)
{
static int found;
int err;
if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
if (__reserved_mem_check_root(node) != 0) {
pr_err("Reserved memory: unsupported node format, ignoring\n");
/* break scan */
return 1;
}
found = 1;
/* scan next node */
return 0;
} else if (!found) {
/* scan next node */
return 0;
} else if (found && depth < 2) {
/* scanning of /reserved-memory has been finished */
return 1;
}
if (!of_fdt_device_is_available(initial_boot_params, node))
return 0;
err = __reserved_mem_reserve_reg(node, uname);
if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
fdt_reserved_mem_save_node(node, uname, 0, 0);
/* scan next node */
return 0;
}
/**
* early_init_fdt_scan_reserved_mem() - create reserved memory regions
*
* This function grabs memory from early allocator for device exclusive use
* defined in device tree structures. It should be called by arch specific code
* once the early allocator (i.e. memblock) has been fully activated.
*/
void __init early_init_fdt_scan_reserved_mem(void)
{
int n;
u64 base, size;
if (!initial_boot_params)
return;
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
if (!size)
break;
early_init_dt_reserve_memory_arch(base, size, false);
}
of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
fdt_init_reserved_mem();
}
/**
* early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
*/
void __init early_init_fdt_reserve_self(void)
{
if (!initial_boot_params)
return;
/* Reserve the dtb region */
early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
fdt_totalsize(initial_boot_params),
false);
}
/**
* of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function
* @data: context data pointer
*
* This function is used to scan the flattened device-tree, it is
* used to extract the memory information at boot before we can
* unflatten the tree
*/
int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *uname, int depth,
void *data),
void *data)
{
const void *blob = initial_boot_params;
const char *pathp;
int offset, rc = 0, depth = -1;
if (!blob)
return 0;
for (offset = fdt_next_node(blob, -1, &depth);
offset >= 0 && depth >= 0 && !rc;
offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
rc = it(offset, pathp, depth, data);
}
return rc;
}
/**
* of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
* @it: callback function
* @data: context data pointer
*
* This function is used to scan sub-nodes of a node.
*/
int __init of_scan_flat_dt_subnodes(unsigned long parent,
int (*it)(unsigned long node,
const char *uname,
void *data),
void *data)
{
const void *blob = initial_boot_params;
int node;
fdt_for_each_subnode(node, blob, parent) {
const char *pathp;
int rc;
pathp = fdt_get_name(blob, node, NULL);
rc = it(node, pathp, data);
if (rc)
return rc;
}
return 0;
}
/**
* of_get_flat_dt_subnode_by_name - get the subnode by given name
*
* @node: the parent node
* @uname: the name of subnode
* @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
*/
int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
{
return fdt_subnode_offset(initial_boot_params, node, uname);
}
/**
* of_get_flat_dt_root - find the root node in the flat blob
*/
unsigned long __init of_get_flat_dt_root(void)
{
return 0;
}
/**
* of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
*
* This function can be used within scan_flattened_dt callback to get
* access to properties
*/
const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
int *size)
{
return fdt_getprop(initial_boot_params, node, name, size);
}
/**
* of_fdt_is_compatible - Return true if given node from the given blob has
* compat in its compatible list
* @blob: A device tree blob
* @node: node to test
* @compat: compatible string to compare with compatible list.
*
* On match, returns a non-zero value with smaller values returned for more
* specific compatible values.
*/
static int of_fdt_is_compatible(const void *blob,
unsigned long node, const char *compat)
{
const char *cp;
int cplen;
unsigned long l, score = 0;
cp = fdt_getprop(blob, node, "compatible", &cplen);
if (cp == NULL)
return 0;
while (cplen > 0) {
score++;
if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
return score;
l = strlen(cp) + 1;
cp += l;
cplen -= l;
}
return 0;
}
/**
* of_flat_dt_is_compatible - Return true if given node has compat in compatible list
* @node: node to test
* @compat: compatible string to compare with compatible list.
*/
int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
{
return of_fdt_is_compatible(initial_boot_params, node, compat);
}
/**
* of_flat_dt_match - Return true if node matches a list of compatible values
*/
static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
{
unsigned int tmp, score = 0;
if (!compat)
return 0;
while (*compat) {
tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
if (tmp && (score == 0 || (tmp < score)))
score = tmp;
compat++;
}
return score;
}
/**
* of_get_flat_dt_prop - Given a node in the flat blob, return the phandle
*/
uint32_t __init of_get_flat_dt_phandle(unsigned long node)
{
return fdt_get_phandle(initial_boot_params, node);
}
struct fdt_scan_status {
const char *name;
int namelen;
int depth;
int found;
int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
void *data;
};
const char * __init of_flat_dt_get_machine_name(void)
{
const char *name;
unsigned long dt_root = of_get_flat_dt_root();
name = of_get_flat_dt_prop(dt_root, "model", NULL);
if (!name)
name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
return name;
}
/**
* of_flat_dt_match_machine - Iterate match tables to find matching machine.
*
* @default_match: A machine specific ptr to return in case of no match.
* @get_next_compat: callback function to return next compatible match table.
*
* Iterate through machine match tables to find the best match for the machine
* compatible string in the FDT.
*/
const void * __init of_flat_dt_match_machine(const void *default_match,
const void * (*get_next_compat)(const char * const**))
{
const void *data = NULL;
const void *best_data = default_match;
const char *const *compat;
unsigned long dt_root;
unsigned int best_score = ~1, score = 0;
dt_root = of_get_flat_dt_root();
while ((data = get_next_compat(&compat))) {
score = of_flat_dt_match(dt_root, compat);
if (score > 0 && score < best_score) {
best_data = data;
best_score = score;
}
}
if (!best_data) {
const char *prop;
int size;
pr_err("\n unrecognized device tree list:\n[ ");
prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
if (prop) {
while (size > 0) {
printk("'%s' ", prop);
size -= strlen(prop) + 1;
prop += strlen(prop) + 1;
}
}
printk("]\n\n");
return NULL;
}
pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
return best_data;
}
#ifdef CONFIG_BLK_DEV_INITRD
static void __early_init_dt_declare_initrd(unsigned long start,
unsigned long end)
{
/* ARM64 would cause a BUG to occur here when CONFIG_DEBUG_VM is
* enabled since __va() is called too early. ARM64 does make use
* of phys_initrd_start/phys_initrd_size so we can skip this
* conversion.
*/
if (!IS_ENABLED(CONFIG_ARM64)) {
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
initrd_below_start_ok = 1;
}
}
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
*/
static void __init early_init_dt_check_for_initrd(unsigned long node)
{
u64 start, end;
int len;
const __be32 *prop;
pr_debug("Looking for initrd properties... ");
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
end = of_read_number(prop, len/4);
__early_init_dt_declare_initrd(start, end);
phys_initrd_start = start;
phys_initrd_size = end - start;
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
(unsigned long long)start, (unsigned long long)end);
}
#else
static inline void early_init_dt_check_for_initrd(unsigned long node)
{
}
#endif /* CONFIG_BLK_DEV_INITRD */
#ifdef CONFIG_SERIAL_EARLYCON
int __init early_init_dt_scan_chosen_stdout(void)
{
int offset;
const char *p, *q, *options = NULL;
int l;
const struct earlycon_id *match;
const void *fdt = initial_boot_params;
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0)
offset = fdt_path_offset(fdt, "/chosen@0");
if (offset < 0)
return -ENOENT;
p = fdt_getprop(fdt, offset, "stdout-path", &l);
if (!p)
p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
if (!p || !l)
return -ENOENT;
q = strchrnul(p, ':');
if (*q != '\0')
options = q + 1;
l = q - p;
/* Get the node specified by stdout-path */
offset = fdt_path_offset_namelen(fdt, p, l);
if (offset < 0) {
pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
return 0;
}
for (match = __earlycon_table; match < __earlycon_table_end; match++) {
if (!match->compatible[0])
continue;
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
if (of_setup_earlycon(match, offset, options) == 0)
return 0;
}
return -ENODEV;
}
#endif
/**
* early_init_dt_scan_root - fetch the top level address and size cells
*/
int __init early_init_dt_scan_root(unsigned long node, const char *uname,
int depth, void *data)
{
const __be32 *prop;
if (depth != 0)
return 0;
dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
if (prop)
dt_root_size_cells = be32_to_cpup(prop);
pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
if (prop)
dt_root_addr_cells = be32_to_cpup(prop);
pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
/* break now */
return 1;
}
u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
{
const __be32 *p = *cellp;
*cellp = p + s;
return of_read_number(p, s);
}
/**
* early_init_dt_scan_memory - Look for and parse memory nodes
*/
int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data)
{
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const __be32 *reg, *endp;
int l;
bool hotpluggable;
/* We are scanning "memory" nodes only */
if (type == NULL || strcmp(type, "memory") != 0)
return 0;
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
if (reg == NULL)
reg = of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
return 0;
endp = reg + (l / sizeof(__be32));
hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
pr_debug("memory scan node %s, reg size %d,\n", uname, l);
while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
u64 base, size;
base = dt_mem_next_cell(dt_root_addr_cells, &reg);
size = dt_mem_next_cell(dt_root_size_cells, &reg);
if (size == 0)
continue;
pr_debug(" - %llx , %llx\n", (unsigned long long)base,
(unsigned long long)size);
early_init_dt_add_memory_arch(base, size);
if (!hotpluggable)
continue;
if (early_init_dt_mark_hotplug_memory_arch(base, size))
pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
base, base + size);
}
return 0;
}
int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data)
{
int l;
const char *p;
const void *rng_seed;
pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
if (depth != 1 || !data ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
early_init_dt_check_for_initrd(node);
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
* managed to set the command line, unless CONFIG_CMDLINE_FORCE
* is set in which case we override whatever was found earlier.
*/
#ifdef CONFIG_CMDLINE
#if defined(CONFIG_CMDLINE_EXTEND)
strlcat(data, " ", COMMAND_LINE_SIZE);
strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
if (!((char *)data)[0])
strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
pr_debug("Command line is: %s\n", (char *)data);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
add_bootloader_randomness(rng_seed, l);
/* try to clear seed so it won't be found. */
fdt_nop_property(initial_boot_params, node, "rng-seed");
/* update CRC check value */
of_fdt_crc32 = crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params));
}
/* break now */
return 1;
}
#ifndef MIN_MEMBLOCK_ADDR
#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
#endif
#ifndef MAX_MEMBLOCK_ADDR
#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
#endif
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
const u64 phys_offset = MIN_MEMBLOCK_ADDR;
if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
if (!PAGE_ALIGNED(base)) {
size -= PAGE_SIZE - (base & ~PAGE_MASK);
base = PAGE_ALIGN(base);
}
size &= PAGE_MASK;
if (base > MAX_MEMBLOCK_ADDR) {
pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
size = MAX_MEMBLOCK_ADDR - base + 1;
}
if (base + size < phys_offset) {
pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
if (base < phys_offset) {
pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
base, phys_offset);
size -= phys_offset - base;
base = phys_offset;
}
memblock_add(base, size);
}
int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
{
return memblock_mark_hotplug(base, size);
}
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
of/fdt: Make sure no-map does not remove already reserved regions If the device tree is incorrectly configured, and attempts to define a "no-map" reserved memory that overlaps with the kernel data/code, the kernel would crash quickly after boot, with no obvious clue about the nature of the issue. For example, this would happen if we have the kernel mapped at these addresses (from /proc/iomem): 40000000-41ffffff : System RAM 40080000-40dfffff : Kernel code 40e00000-411fffff : reserved 41200000-413e0fff : Kernel data And we declare a no-map shared-dma-pool region at a fixed address within that range: mem_reserved: mem_region { compatible = "shared-dma-pool"; reg = <0 0x40000000 0 0x01A00000>; no-map; }; To fix this, when removing memory regions at early boot (which is what "no-map" regions do), we need to make sure that the memory is not already reserved. If we do, __reserved_mem_reserve_reg will throw an error: [ 0.000000] OF: fdt: Reserved memory: failed to reserve memory for node 'mem_region': base 0x0000000040000000, size 26 MiB and the code that will try to use the region should also fail, later on. We do not do anything for non-"no-map" regions, as memblock explicitly allows reserved regions to overlap, and the commit that this fixes removed the check for that precise reason. [ qperret: fixed conflicts caused by the usage of memblock_mark_nomap ] Fixes: 094cb98179f19b7 ("of/fdt: memblock_reserve /memreserve/ regions in the case of partial overlap") Signed-off-by: Nicolas Boichat <drinkcat@chromium.org> Reviewed-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20210115114544.1830068-3-qperret@google.com Signed-off-by: Rob Herring <robh@kernel.org>
2021-01-15 19:45:44 +08:00
if (nomap) {
/*
* If the memory is already reserved (by another region), we
* should not allow it to be marked nomap.
*/
if (memblock_is_region_reserved(base, size))
return -EBUSY;
return memblock_mark_nomap(base, size);
of/fdt: Make sure no-map does not remove already reserved regions If the device tree is incorrectly configured, and attempts to define a "no-map" reserved memory that overlaps with the kernel data/code, the kernel would crash quickly after boot, with no obvious clue about the nature of the issue. For example, this would happen if we have the kernel mapped at these addresses (from /proc/iomem): 40000000-41ffffff : System RAM 40080000-40dfffff : Kernel code 40e00000-411fffff : reserved 41200000-413e0fff : Kernel data And we declare a no-map shared-dma-pool region at a fixed address within that range: mem_reserved: mem_region { compatible = "shared-dma-pool"; reg = <0 0x40000000 0 0x01A00000>; no-map; }; To fix this, when removing memory regions at early boot (which is what "no-map" regions do), we need to make sure that the memory is not already reserved. If we do, __reserved_mem_reserve_reg will throw an error: [ 0.000000] OF: fdt: Reserved memory: failed to reserve memory for node 'mem_region': base 0x0000000040000000, size 26 MiB and the code that will try to use the region should also fail, later on. We do not do anything for non-"no-map" regions, as memblock explicitly allows reserved regions to overlap, and the commit that this fixes removed the check for that precise reason. [ qperret: fixed conflicts caused by the usage of memblock_mark_nomap ] Fixes: 094cb98179f19b7 ("of/fdt: memblock_reserve /memreserve/ regions in the case of partial overlap") Signed-off-by: Nicolas Boichat <drinkcat@chromium.org> Reviewed-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20210115114544.1830068-3-qperret@google.com Signed-off-by: Rob Herring <robh@kernel.org>
2021-01-15 19:45:44 +08:00
}
return memblock_reserve(base, size);
}
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
treewide: add checks for the return value of memblock_alloc*() Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-03-12 14:30:31 +08:00
void *ptr = memblock_alloc(size, align);
if (!ptr)
panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
__func__, size, align);
return ptr;
}
bool __init early_init_dt_verify(void *params)
{
if (!params)
return false;
/* check device tree validity */
if (fdt_check_header(params))
return false;
/* Setup flat device-tree pointer */
initial_boot_params = params;
of_fdt_crc32 = crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params));
return true;
}
void __init early_init_dt_scan_nodes(void)
{
int rc = 0;
/* Retrieve various information from the /chosen node */
rc = of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
if (!rc)
pr_warn("No chosen node found, continuing without\n");
/* Initialize {size,address}-cells info */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
/* Setup memory, calling early_init_dt_add_memory_arch */
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
}
bool __init early_init_dt_scan(void *params)
{
bool status;
status = early_init_dt_verify(params);
if (!status)
return false;
early_init_dt_scan_nodes();
return true;
}
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
* unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used.
*/
void __init unflatten_device_tree(void)
{
__unflatten_device_tree(initial_boot_params, NULL, &of_root,
early_init_dt_alloc_memory_arch, false);
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
of_alias_scan(early_init_dt_alloc_memory_arch);
unittest_unflatten_overlay_base();
}
/**
* unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
*
* Copies and unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used. This should only be used when the FDT memory has not been
* reserved such is the case when the FDT is built-in to the kernel init
* section. If the FDT memory is reserved already then unflatten_device_tree
* should be used instead.
*/
void __init unflatten_and_copy_device_tree(void)
{
int size;
void *dt;
if (!initial_boot_params) {
pr_warn("No valid device tree found, continuing without\n");
return;
}
size = fdt_totalsize(initial_boot_params);
dt = early_init_dt_alloc_memory_arch(size,
roundup_pow_of_two(FDT_V17_SIZE));
if (dt) {
memcpy(dt, initial_boot_params, size);
initial_boot_params = dt;
}
unflatten_device_tree();
}
#ifdef CONFIG_SYSFS
static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
memcpy(buf, initial_boot_params + off, count);
return count;
}
static int __init of_fdt_raw_init(void)
{
static struct bin_attribute of_fdt_raw_attr =
__BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
if (!initial_boot_params)
return 0;
if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params))) {
pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
return 0;
}
of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
}
late_initcall(of_fdt_raw_init);
#endif
#endif /* CONFIG_OF_EARLY_FLATTREE */