ia64: add checks for the return value of memblock_alloc*()

Add panic() calls if memblock_alloc*() returns NULL.

Most of the changes are simply addition of

	if(!ptr)
		panic();

statements after the calls to memblock_alloc*() variants.

Exceptions are create_mem_map_page_table() and ia64_log_init() that were
slightly refactored to accommodate the change.

Link: http://lkml.kernel.org/r/1548057848-15136-15-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Guo Ren <ren_guo@c-sky.com>				[c-sky]
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Juergen Gross <jgross@suse.com>			[Xen]
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Rob Herring <robh@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mike Rapoport 2019-03-11 23:30:00 -07:00 committed by Linus Torvalds
parent 0240dfd5b4
commit d80db5c1ed
7 changed files with 74 additions and 17 deletions

View File

@ -359,11 +359,6 @@ typedef struct ia64_state_log_s
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_ALLOCATE(it, size) \
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@ -378,6 +373,19 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
static inline void ia64_log_allocate(int it, u64 size)
{
ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
panic("%s: Failed to allocate %llu bytes\n", __func__, size);
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
panic("%s: Failed to allocate %llu bytes\n", __func__, size);
}
/* /*
* ia64_log_init * ia64_log_init
* Reset the OS ia64 log buffer * Reset the OS ia64 log buffer
@ -399,7 +407,7 @@ ia64_log_init(int sal_info_type)
return; return;
// set up OS data structures to hold error info // set up OS data structures to hold error info
IA64_LOG_ALLOCATE(sal_info_type, max_size); ia64_log_allocate(sal_info_type, max_size);
} }
/* /*

View File

@ -84,9 +84,13 @@ skip:
static inline void static inline void
alloc_per_cpu_data(void) alloc_per_cpu_data(void)
{ {
cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(), size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
PERCPU_PAGE_SIZE,
cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS)); __pa(MAX_DMA_ADDRESS));
if (!cpu_data)
panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
__func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
} }
/** /**

View File

@ -454,6 +454,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, MEMBLOCK_ALLOC_ACCESSIBLE,
bestnode); bestnode);
if (!ptr)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
__func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
__pa(MAX_DMA_ADDRESS));
return ptr; return ptr;
} }

View File

@ -444,23 +444,45 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) { for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
if (pgd_none(*pgd)) if (pgd_none(*pgd)) {
pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)); pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!pud)
goto err_alloc;
pgd_populate(&init_mm, pgd, pud);
}
pud = pud_offset(pgd, address); pud = pud_offset(pgd, address);
if (pud_none(*pud)) if (pud_none(*pud)) {
pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)); pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!pmd)
goto err_alloc;
pud_populate(&init_mm, pud, pmd);
}
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
if (pmd_none(*pmd)) if (pmd_none(*pmd)) {
pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)); pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!pte)
goto err_alloc;
pmd_populate_kernel(&init_mm, pmd, pte);
}
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
if (pte_none(*pte)) if (pte_none(*pte)) {
set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT, void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
node);
if (!page)
goto err_alloc;
set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
PAGE_KERNEL)); PAGE_KERNEL));
}
} }
return 0; return 0;
err_alloc:
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
__func__, PAGE_SIZE, PAGE_SIZE, node);
return -ENOMEM;
} }
struct memmap_init_callback_data { struct memmap_init_callback_data {

View File

@ -61,8 +61,14 @@ mmu_context_init (void)
{ {
ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
SMP_CACHE_BYTES); SMP_CACHE_BYTES);
if (!ia64_ctx.bitmap)
panic("%s: Failed to allocate %u bytes\n", __func__,
(ia64_ctx.max_ctx + 1) >> 3);
ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
SMP_CACHE_BYTES); SMP_CACHE_BYTES);
if (!ia64_ctx.flushmap)
panic("%s: Failed to allocate %u bytes\n", __func__,
(ia64_ctx.max_ctx + 1) >> 3);
} }
/* /*

View File

@ -394,6 +394,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
SMP_CACHE_BYTES, SMP_CACHE_BYTES,
node); node);
if (!hubdev_info)
panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n",
__func__, size, SMP_CACHE_BYTES, node);
npda->pdinfo = (void *)hubdev_info; npda->pdinfo = (void *)hubdev_info;
} }

View File

@ -513,6 +513,10 @@ static void __init sn_init_pdas(char **cmdline_p)
nodepdaindr[cnode] = nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode); cnode);
if (!nodepdaindr[cnode])
panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
__func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
memset(nodepdaindr[cnode]->phys_cpuid, -1, memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid)); sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock); spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@ -521,9 +525,15 @@ static void __init sn_init_pdas(char **cmdline_p)
/* /*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0. * Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/ */
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
nodepdaindr[cnode] = nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0); memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
if (!nodepdaindr[cnode])
panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
__func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
}
/* /*
* Now copy the array of nodepda pointers to each nodepda. * Now copy the array of nodepda pointers to each nodepda.