memblock: Kill MEMBLOCK_ERROR
25818f0f28
(memblock: Make MEMBLOCK_ERROR be 0) thankfully made
MEMBLOCK_ERROR 0 and there already are codes which expect error return
to be 0. There's no point in keeping MEMBLOCK_ERROR around. End its
misery.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310457490-3356-6-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
348968eb15
commit
1f5026a7e2
|
@ -88,7 +88,7 @@ static u32 __init allocate_aperture(void)
|
||||||
*/
|
*/
|
||||||
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
|
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
|
||||||
aper_size, aper_size);
|
aper_size, aper_size);
|
||||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
|
if (!addr || addr + aper_size > GART_MAX_ADDR) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
||||||
addr, aper_size>>10);
|
addr, aper_size>>10);
|
||||||
|
|
|
@ -86,7 +86,7 @@ void __init setup_bios_corruption_check(void)
|
||||||
u64 size;
|
u64 size;
|
||||||
addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);
|
addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);
|
||||||
|
|
||||||
if (addr == MEMBLOCK_ERROR)
|
if (!addr)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (addr >= corruption_check_size)
|
if (addr >= corruption_check_size)
|
||||||
|
|
|
@ -745,7 +745,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
|
||||||
|
|
||||||
for (start = startt; ; start += size) {
|
for (start = startt; ; start += size) {
|
||||||
start = memblock_x86_find_in_range_size(start, &size, align);
|
start = memblock_x86_find_in_range_size(start, &size, align);
|
||||||
if (start == MEMBLOCK_ERROR)
|
if (!start)
|
||||||
return 0;
|
return 0;
|
||||||
if (size >= sizet)
|
if (size >= sizet)
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -331,7 +331,7 @@ static void __init relocate_initrd(void)
|
||||||
ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
|
ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
|
|
||||||
if (ramdisk_here == MEMBLOCK_ERROR)
|
if (!ramdisk_here)
|
||||||
panic("Cannot find place for new RAMDISK of size %lld\n",
|
panic("Cannot find place for new RAMDISK of size %lld\n",
|
||||||
ramdisk_size);
|
ramdisk_size);
|
||||||
|
|
||||||
|
@ -554,7 +554,7 @@ static void __init reserve_crashkernel(void)
|
||||||
crash_base = memblock_find_in_range(alignment,
|
crash_base = memblock_find_in_range(alignment,
|
||||||
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
|
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
|
||||||
|
|
||||||
if (crash_base == MEMBLOCK_ERROR) {
|
if (!crash_base) {
|
||||||
pr_info("crashkernel reservation failed - No suitable area found.\n");
|
pr_info("crashkernel reservation failed - No suitable area found.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ void __init setup_trampolines(void)
|
||||||
|
|
||||||
/* Has to be in very low memory so we can execute real-mode AP code. */
|
/* Has to be in very low memory so we can execute real-mode AP code. */
|
||||||
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
|
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
|
||||||
if (mem == MEMBLOCK_ERROR)
|
if (!mem)
|
||||||
panic("Cannot allocate trampoline\n");
|
panic("Cannot allocate trampoline\n");
|
||||||
|
|
||||||
x86_trampoline_base = __va(mem);
|
x86_trampoline_base = __va(mem);
|
||||||
|
|
|
@ -68,7 +68,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
|
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
|
||||||
if (base == MEMBLOCK_ERROR)
|
if (!base)
|
||||||
panic("Cannot find space for the kernel page tables");
|
panic("Cannot find space for the kernel page tables");
|
||||||
|
|
||||||
pgt_buf_start = base >> PAGE_SHIFT;
|
pgt_buf_start = base >> PAGE_SHIFT;
|
||||||
|
|
|
@ -66,7 +66,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init struct range *find_range_array(int count)
|
static __init struct range *find_range_array(int count)
|
||||||
|
@ -78,7 +78,7 @@ static __init struct range *find_range_array(int count)
|
||||||
end = memblock.current_limit;
|
end = memblock.current_limit;
|
||||||
|
|
||||||
mem = memblock_find_in_range(0, end, size, sizeof(struct range));
|
mem = memblock_find_in_range(0, end, size, sizeof(struct range));
|
||||||
if (mem == MEMBLOCK_ERROR)
|
if (!mem)
|
||||||
panic("can not find more space for range array");
|
panic("can not find more space for range array");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -274,7 +274,7 @@ u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size
|
||||||
{
|
{
|
||||||
u64 addr;
|
u64 addr;
|
||||||
addr = find_memory_core_early(nid, size, align, start, end);
|
addr = find_memory_core_early(nid, size, align, start, end);
|
||||||
if (addr != MEMBLOCK_ERROR)
|
if (addr)
|
||||||
return addr;
|
return addr;
|
||||||
|
|
||||||
/* Fallback, should already have start end within node range */
|
/* Fallback, should already have start end within node range */
|
||||||
|
|
|
@ -226,10 +226,10 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
|
||||||
} else {
|
} else {
|
||||||
nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
|
nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
|
||||||
nd_size, SMP_CACHE_BYTES);
|
nd_size, SMP_CACHE_BYTES);
|
||||||
if (nd_pa == MEMBLOCK_ERROR)
|
if (!nd_pa)
|
||||||
nd_pa = memblock_find_in_range(nd_low, nd_high,
|
nd_pa = memblock_find_in_range(nd_low, nd_high,
|
||||||
nd_size, SMP_CACHE_BYTES);
|
nd_size, SMP_CACHE_BYTES);
|
||||||
if (nd_pa == MEMBLOCK_ERROR) {
|
if (!nd_pa) {
|
||||||
pr_err("Cannot find %zu bytes in node %d\n",
|
pr_err("Cannot find %zu bytes in node %d\n",
|
||||||
nd_size, nid);
|
nd_size, nid);
|
||||||
return;
|
return;
|
||||||
|
@ -395,7 +395,7 @@ static int __init numa_alloc_distance(void)
|
||||||
|
|
||||||
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
||||||
size, PAGE_SIZE);
|
size, PAGE_SIZE);
|
||||||
if (phys == MEMBLOCK_ERROR) {
|
if (!phys) {
|
||||||
pr_warning("NUMA: Warning: can't allocate distance table!\n");
|
pr_warning("NUMA: Warning: can't allocate distance table!\n");
|
||||||
/* don't retry until explicitly reset */
|
/* don't retry until explicitly reset */
|
||||||
numa_distance = (void *)1LU;
|
numa_distance = (void *)1LU;
|
||||||
|
|
|
@ -199,7 +199,7 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
|
||||||
|
|
||||||
/* allocate node memory and the lowmem remap area */
|
/* allocate node memory and the lowmem remap area */
|
||||||
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
|
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
|
||||||
if (node_pa == MEMBLOCK_ERROR) {
|
if (!node_pa) {
|
||||||
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
|
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
|
||||||
size, nid);
|
size, nid);
|
||||||
return;
|
return;
|
||||||
|
@ -209,7 +209,7 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
|
||||||
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
|
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
|
||||||
max_low_pfn << PAGE_SHIFT,
|
max_low_pfn << PAGE_SHIFT,
|
||||||
size, LARGE_PAGE_BYTES);
|
size, LARGE_PAGE_BYTES);
|
||||||
if (remap_pa == MEMBLOCK_ERROR) {
|
if (!remap_pa) {
|
||||||
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
|
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
|
||||||
size, nid);
|
size, nid);
|
||||||
memblock_x86_free_range(node_pa, node_pa + size);
|
memblock_x86_free_range(node_pa, node_pa + size);
|
||||||
|
|
|
@ -351,7 +351,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
|
||||||
|
|
||||||
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
||||||
phys_size, PAGE_SIZE);
|
phys_size, PAGE_SIZE);
|
||||||
if (phys == MEMBLOCK_ERROR) {
|
if (!phys) {
|
||||||
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
|
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
|
||||||
goto no_emu;
|
goto no_emu;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,6 @@
|
||||||
#define _LINUX_MEMBLOCK_H
|
#define _LINUX_MEMBLOCK_H
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#define MEMBLOCK_ERROR 0
|
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_MEMBLOCK
|
#ifdef CONFIG_HAVE_MEMBLOCK
|
||||||
/*
|
/*
|
||||||
* Logical memory blocks.
|
* Logical memory blocks.
|
||||||
|
@ -164,7 +162,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
|
||||||
#else
|
#else
|
||||||
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||||
{
|
{
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_MEMBLOCK */
|
#endif /* CONFIG_HAVE_MEMBLOCK */
|
||||||
|
|
|
@ -199,7 +199,7 @@ void __init setup_log_buf(int early)
|
||||||
unsigned long mem;
|
unsigned long mem;
|
||||||
|
|
||||||
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
|
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
|
||||||
if (mem == MEMBLOCK_ERROR)
|
if (!mem)
|
||||||
return;
|
return;
|
||||||
new_log_buf = __va(mem);
|
new_log_buf = __va(mem);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -74,7 +74,7 @@ static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_
|
||||||
|
|
||||||
/* In case, huge size is requested */
|
/* In case, huge size is requested */
|
||||||
if (end < size)
|
if (end < size)
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
|
|
||||||
base = round_down(end - size, align);
|
base = round_down(end - size, align);
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_
|
||||||
base = round_down(res_base - size, align);
|
base = round_down(res_base - size, align);
|
||||||
}
|
}
|
||||||
|
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
|
static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
|
||||||
|
@ -126,10 +126,10 @@ static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
|
||||||
if (bottom >= top)
|
if (bottom >= top)
|
||||||
continue;
|
continue;
|
||||||
found = memblock_find_region(bottom, top, size, align);
|
found = memblock_find_region(bottom, top, size, align);
|
||||||
if (found != MEMBLOCK_ERROR)
|
if (found)
|
||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -214,10 +214,10 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
|
||||||
*/
|
*/
|
||||||
if (use_slab) {
|
if (use_slab) {
|
||||||
new_array = kmalloc(new_size, GFP_KERNEL);
|
new_array = kmalloc(new_size, GFP_KERNEL);
|
||||||
addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
|
addr = new_array ? __pa(new_array) : 0;
|
||||||
} else
|
} else
|
||||||
addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
|
addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||||
if (addr == MEMBLOCK_ERROR) {
|
if (!addr) {
|
||||||
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
|
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
|
||||||
memblock_type_name(type), type->max, type->max * 2);
|
memblock_type_name(type), type->max, type->max * 2);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -478,8 +478,7 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
|
||||||
size = round_up(size, align);
|
size = round_up(size, align);
|
||||||
|
|
||||||
found = memblock_find_base(size, align, 0, max_addr);
|
found = memblock_find_base(size, align, 0, max_addr);
|
||||||
if (found != MEMBLOCK_ERROR &&
|
if (found && !memblock_add_region(&memblock.reserved, found, size))
|
||||||
!memblock_add_region(&memblock.reserved, found, size))
|
|
||||||
return found;
|
return found;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -559,14 +558,14 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
|
||||||
this_end = memblock_nid_range(start, end, &this_nid);
|
this_end = memblock_nid_range(start, end, &this_nid);
|
||||||
if (this_nid == nid) {
|
if (this_nid == nid) {
|
||||||
phys_addr_t ret = memblock_find_region(start, this_end, size, align);
|
phys_addr_t ret = memblock_find_region(start, this_end, size, align);
|
||||||
if (ret != MEMBLOCK_ERROR &&
|
if (ret &&
|
||||||
!memblock_add_region(&memblock.reserved, ret, size))
|
!memblock_add_region(&memblock.reserved, ret, size))
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
start = this_end;
|
start = this_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
|
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
|
||||||
|
@ -588,7 +587,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
|
||||||
for (i = 0; i < mem->cnt; i++) {
|
for (i = 0; i < mem->cnt; i++) {
|
||||||
phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
|
phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
|
||||||
size, align, nid);
|
size, align, nid);
|
||||||
if (ret != MEMBLOCK_ERROR)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
|
||||||
|
|
||||||
addr = find_memory_core_early(nid, size, align, goal, limit);
|
addr = find_memory_core_early(nid, size, align, goal, limit);
|
||||||
|
|
||||||
if (addr == MEMBLOCK_ERROR)
|
if (!addr)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ptr = phys_to_virt(addr);
|
ptr = phys_to_virt(addr);
|
||||||
|
|
|
@ -3878,13 +3878,13 @@ u64 __init find_memory_core_early(int nid, u64 size, u64 align,
|
||||||
|
|
||||||
addr = memblock_find_in_range(final_start, final_end, size, align);
|
addr = memblock_find_in_range(final_start, final_end, size, align);
|
||||||
|
|
||||||
if (addr == MEMBLOCK_ERROR)
|
if (!addr)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue