powerpc: Rearrange SLB preload code
With the new top down layout it is likely that the pc and stack will be in the same segment, because the pc is most likely in a library allocated via a top down mmap. Right now we bail out early if these segments match. Rearrange the SLB preload code to sanity check all SLB preload addresses are not in the kernel, then check all addresses for conflicts. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
30d0b36828
commit
5eb9bac040
|
@ -218,22 +218,17 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
|||
else
|
||||
unmapped_base = TASK_UNMAPPED_BASE_USER64;
|
||||
|
||||
if (is_kernel_addr(pc))
|
||||
if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
|
||||
is_kernel_addr(unmapped_base))
|
||||
return;
|
||||
|
||||
slb_allocate(pc);
|
||||
|
||||
if (esids_match(pc,stack))
|
||||
return;
|
||||
|
||||
if (is_kernel_addr(stack))
|
||||
return;
|
||||
if (!esids_match(pc, stack))
|
||||
slb_allocate(stack);
|
||||
|
||||
if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base))
|
||||
return;
|
||||
|
||||
if (is_kernel_addr(unmapped_base))
|
||||
return;
|
||||
if (!esids_match(pc, unmapped_base) &&
|
||||
!esids_match(stack, unmapped_base))
|
||||
slb_allocate(unmapped_base);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue