s390/mem_detect: limit memory detection loop to "mem=" parameter
The current memory detection loop will detect all present memory of a machine. This is true even if the user specified the "mem=" parameter on the kernel command line. This can be a problem since the memory detection may cause a fully populated host page table for the guest, even for those parts of the memory that the guest will never use afterwards. So fix this and only detect memory up to a user supplied "mem=" limit if specified. Reported-by: Michael Johanssen <johanssn@de.ibm.com> Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
67b5c3eeb4
commit
df1bd59c5c
|
@ -46,7 +46,7 @@ extern struct mem_chunk memory_chunk[];
|
|||
extern int memory_end_set;
|
||||
extern unsigned long memory_end;
|
||||
|
||||
void detect_memory_layout(struct mem_chunk chunk[]);
|
||||
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
|
||||
void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
|
||||
unsigned long size, int type);
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ static struct mem_chunk *get_memory_layout(void)
|
|||
struct mem_chunk *chunk_array;
|
||||
|
||||
chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
|
||||
detect_memory_layout(chunk_array);
|
||||
detect_memory_layout(chunk_array, 0);
|
||||
create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
|
||||
return chunk_array;
|
||||
}
|
||||
|
|
|
@ -482,7 +482,6 @@ void __init startup_init(void)
|
|||
detect_machine_facilities();
|
||||
setup_topology();
|
||||
sclp_facilities_detect();
|
||||
detect_memory_layout(memory_chunk);
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
|
||||
#endif
|
||||
|
|
|
@ -1066,7 +1066,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
|
||||
|
||||
parse_early_param();
|
||||
|
||||
detect_memory_layout(memory_chunk, memory_end);
|
||||
os_info_init();
|
||||
setup_ipl();
|
||||
reserve_oldmem();
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
#define ADDR2G (1ULL << 31)
|
||||
|
||||
static void find_memory_chunks(struct mem_chunk chunk[])
|
||||
static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize)
|
||||
{
|
||||
unsigned long long memsize, rnmax, rzm;
|
||||
unsigned long addr = 0, size;
|
||||
|
@ -27,6 +27,8 @@ static void find_memory_chunks(struct mem_chunk chunk[])
|
|||
rzm = min(ADDR2G, rzm);
|
||||
memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
|
||||
}
|
||||
if (maxsize)
|
||||
memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize;
|
||||
do {
|
||||
size = 0;
|
||||
type = tprot(addr);
|
||||
|
@ -36,6 +38,8 @@ static void find_memory_chunks(struct mem_chunk chunk[])
|
|||
break;
|
||||
} while (type == tprot(addr + size));
|
||||
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
|
||||
if (memsize && (addr + size > memsize))
|
||||
size = memsize - addr;
|
||||
chunk[i].addr = addr;
|
||||
chunk[i].size = size;
|
||||
chunk[i].type = type;
|
||||
|
@ -45,7 +49,20 @@ static void find_memory_chunks(struct mem_chunk chunk[])
|
|||
} while (addr < memsize && i < MEMORY_CHUNKS);
|
||||
}
|
||||
|
||||
void detect_memory_layout(struct mem_chunk chunk[])
|
||||
/**
|
||||
* detect_memory_layout - fill mem_chunk array with memory layout data
|
||||
* @chunk: mem_chunk array to be filled
|
||||
* @maxsize: maximum address where memory detection should stop
|
||||
*
|
||||
* Fills the passed in memory chunk array with the memory layout of the
|
||||
* machine. The array must have a size of at least MEMORY_CHUNKS and will
|
||||
* be fully initialized afterwards.
|
||||
* If the maxsize paramater has a value > 0 memory detection will stop at
|
||||
* that address. It is guaranteed that all chunks have an ending address
|
||||
* that is smaller than maxsize.
|
||||
* If maxsize is 0 all memory will be detected.
|
||||
*/
|
||||
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
|
||||
{
|
||||
unsigned long flags, flags_dat, cr0;
|
||||
|
||||
|
@ -69,7 +86,7 @@ void detect_memory_layout(struct mem_chunk chunk[])
|
|||
}
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_clear_bit(0, 28);
|
||||
find_memory_chunks(chunk);
|
||||
find_memory_chunks(chunk, maxsize);
|
||||
__ctl_load(cr0, 0, 0);
|
||||
out:
|
||||
__arch_local_irq_ssm(flags_dat);
|
||||
|
|
|
@ -426,7 +426,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
|
|||
GFP_KERNEL);
|
||||
if (!chunk_array)
|
||||
return -ENOMEM;
|
||||
detect_memory_layout(chunk_array);
|
||||
detect_memory_layout(chunk_array, 0);
|
||||
buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
kfree(chunk_array);
|
||||
|
@ -610,7 +610,7 @@ static int __init get_mem_info(unsigned long *mem, unsigned long *end)
|
|||
GFP_KERNEL);
|
||||
if (!chunk_array)
|
||||
return -ENOMEM;
|
||||
detect_memory_layout(chunk_array);
|
||||
detect_memory_layout(chunk_array, 0);
|
||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||
if (chunk_array[i].size == 0)
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue