percpu: fix first chunk match in per_cpu_ptr_to_phys()
per_cpu_ptr_to_phys() determines whether the passed in @addr belongs to the first_chunk or not by just matching the address against the address range of the base unit (unit0, used by cpu0). When an adress from another cpu was passed in, it will always determine that the address doesn't belong to the first chunk even when it does. This makes the function return a bogus physical address which may lead to crash. This problem was discovered by Cliff Wickman while investigating a crash during kdump on a SGI UV system. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Cliff Wickman <cpw@sgi.com> Tested-by: Cliff Wickman <cpw@sgi.com> Cc: stable@kernel.org
This commit is contained in:
parent
a92d3ff9e5
commit
9983b6f0cf
31
mm/percpu.c
31
mm/percpu.c
|
@ -229,8 +229,8 @@ static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
|
|||
return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
|
||||
}
|
||||
|
||||
static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk,
|
||||
unsigned int cpu, int page_idx)
|
||||
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
|
||||
unsigned int cpu, int page_idx)
|
||||
{
|
||||
return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
|
||||
(page_idx << PAGE_SHIFT);
|
||||
|
@ -978,7 +978,32 @@ bool is_kernel_percpu_address(unsigned long addr)
|
|||
*/
|
||||
phys_addr_t per_cpu_ptr_to_phys(void *addr)
|
||||
{
|
||||
if (pcpu_addr_in_first_chunk(addr)) {
|
||||
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
|
||||
bool in_first_chunk = false;
|
||||
unsigned long first_start, first_end;
|
||||
unsigned int cpu;
|
||||
|
||||
/*
|
||||
* The following test on first_start/end isn't strictly
|
||||
* necessary but will speed up lookups of addresses which
|
||||
* aren't in the first chunk.
|
||||
*/
|
||||
first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
|
||||
first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
|
||||
pcpu_unit_pages);
|
||||
if ((unsigned long)addr >= first_start &&
|
||||
(unsigned long)addr < first_end) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *start = per_cpu_ptr(base, cpu);
|
||||
|
||||
if (addr >= start && addr < start + pcpu_unit_size) {
|
||||
in_first_chunk = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (in_first_chunk) {
|
||||
if ((unsigned long)addr < VMALLOC_START ||
|
||||
(unsigned long)addr >= VMALLOC_END)
|
||||
return __pa(addr);
|
||||
|
|
Loading…
Reference in New Issue