powerpc/mm: split out early ioremap path.
ioremap does things differently depending on whether SLAB is available or not at different levels. Try to separate the early path from the beginning. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/3acd2dbe04b04f111475e7a59f2b6f2ab9b95ab6.1566309263.git.christophe.leroy@c-s.fr
This commit is contained in:
parent
4a45b7460c
commit
163918fc57
|
@ -722,7 +722,8 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
|
|||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot);
|
||||
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
|
||||
unsigned long size, pgprot_t prot);
|
||||
void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
|
||||
pgprot_t prot, void *caller);
|
||||
|
||||
|
|
|
@ -59,18 +59,11 @@ void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long f
|
|||
}
|
||||
EXPORT_SYMBOL(ioremap_prot);
|
||||
|
||||
int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot)
|
||||
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
|
||||
unsigned long size, pgprot_t prot)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
if (slab_is_available()) {
|
||||
int err = ioremap_page_range(ea, ea + size, pa, prot);
|
||||
|
||||
if (err)
|
||||
unmap_kernel_range(ea, size);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
int err = map_kernel_page(ea + i, pa + i, prot);
|
||||
|
||||
|
@ -86,16 +79,20 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
|
|||
{
|
||||
struct vm_struct *area;
|
||||
int ret;
|
||||
unsigned long va;
|
||||
|
||||
area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
|
||||
if (area == NULL)
|
||||
return NULL;
|
||||
|
||||
area->phys_addr = pa;
|
||||
ret = ioremap_range((unsigned long)area->addr, pa, size, prot);
|
||||
va = (unsigned long)area->addr;
|
||||
|
||||
ret = ioremap_page_range(va, va + size, pa, prot);
|
||||
if (!ret)
|
||||
return (void __iomem *)area->addr + offset;
|
||||
|
||||
unmap_kernel_range(va, size);
|
||||
free_vm_area(area);
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -60,24 +60,21 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
|
|||
*/
|
||||
v = p_block_mapped(p);
|
||||
if (v)
|
||||
goto out;
|
||||
return (void __iomem *)v + offset;
|
||||
|
||||
if (slab_is_available()) {
|
||||
if (slab_is_available())
|
||||
return do_ioremap(p, offset, size, prot, caller);
|
||||
} else {
|
||||
v = (ioremap_bot -= size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Should check if it is a candidate for a BAT mapping
|
||||
*/
|
||||
|
||||
err = ioremap_range((unsigned long)v, p, size, prot);
|
||||
err = early_ioremap_range(ioremap_bot - size, p, size, prot);
|
||||
if (err)
|
||||
return NULL;
|
||||
ioremap_bot -= size;
|
||||
|
||||
out:
|
||||
return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK));
|
||||
return (void __iomem *)ioremap_bot + offset;
|
||||
}
|
||||
|
||||
void iounmap(volatile void __iomem *addr)
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
*/
|
||||
void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
int ret;
|
||||
unsigned long va = (unsigned long)ea;
|
||||
|
||||
/* We don't support the 4K PFN hack with ioremap */
|
||||
if (pgprot_val(prot) & H_PAGE_4K_PFN)
|
||||
return NULL;
|
||||
|
@ -22,7 +25,15 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
|
|||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||
WARN_ON(size & ~PAGE_MASK);
|
||||
|
||||
if (ioremap_range((unsigned long)ea, pa, size, prot))
|
||||
if (slab_is_available()) {
|
||||
ret = ioremap_page_range(va, va + size, pa, prot);
|
||||
if (ret)
|
||||
unmap_kernel_range(va, size);
|
||||
} else {
|
||||
ret = early_ioremap_range(va, pa, size, prot);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return (void __iomem *)ea;
|
||||
|
@ -48,6 +59,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
|
|||
{
|
||||
phys_addr_t paligned, offset;
|
||||
void __iomem *ret;
|
||||
int err;
|
||||
|
||||
/* We don't support the 4K PFN hack with ioremap */
|
||||
if (pgprot_val(prot) & H_PAGE_4K_PFN)
|
||||
|
@ -66,16 +78,16 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
|
|||
if (size == 0 || paligned == 0)
|
||||
return NULL;
|
||||
|
||||
if (slab_is_available()) {
|
||||
if (slab_is_available())
|
||||
return do_ioremap(paligned, offset, size, prot, caller);
|
||||
} else {
|
||||
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
|
||||
if (ret)
|
||||
ioremap_bot += size;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
ret += addr & ~PAGE_MASK;
|
||||
err = early_ioremap_range(ioremap_bot, paligned, size, prot);
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
ret = (void __iomem *)ioremap_bot + offset;
|
||||
ioremap_bot += size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue