xtensa: support ioremap for memory outside KIO region
Map physical memory outside KIO region into the vmalloc area. Unmap it with vunmap. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
parent
7d20221989
commit
5bb8def55d
|
@ -25,9 +25,12 @@
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
||||||
|
void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
|
||||||
|
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
|
||||||
|
void xtensa_iounmap(volatile void __iomem *addr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the virtual address for the specified bus memory.
|
* Return the virtual address for the specified bus memory.
|
||||||
* Note that we currently don't support any address outside the KIO segment.
|
|
||||||
*/
|
*/
|
||||||
static inline void __iomem *ioremap_nocache(unsigned long offset,
|
static inline void __iomem *ioremap_nocache(unsigned long offset,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
|
@ -36,7 +39,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset,
|
||||||
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
||||||
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
|
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
|
||||||
else
|
else
|
||||||
BUG();
|
return xtensa_ioremap_nocache(offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __iomem *ioremap_cache(unsigned long offset,
|
static inline void __iomem *ioremap_cache(unsigned long offset,
|
||||||
|
@ -46,7 +49,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
|
||||||
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
||||||
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
|
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
|
||||||
else
|
else
|
||||||
BUG();
|
return xtensa_ioremap_cache(offset, size);
|
||||||
}
|
}
|
||||||
#define ioremap_cache ioremap_cache
|
#define ioremap_cache ioremap_cache
|
||||||
|
|
||||||
|
@ -60,6 +63,13 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||||
|
|
||||||
static inline void iounmap(volatile void __iomem *addr)
|
static inline void iounmap(volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
|
unsigned long va = (unsigned long) addr;
|
||||||
|
|
||||||
|
if (!(va >= XCHAL_KIO_CACHED_VADDR &&
|
||||||
|
va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
|
||||||
|
!(va >= XCHAL_KIO_BYPASS_VADDR &&
|
||||||
|
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
|
||||||
|
xtensa_iounmap(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define virt_to_bus virt_to_phys
|
#define virt_to_bus virt_to_phys
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
obj-y := init.o misc.o
|
obj-y := init.o misc.o
|
||||||
obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
|
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
|
||||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
obj-$(CONFIG_HIGHMEM) += highmem.o
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
/*
|
||||||
|
* ioremap implementation.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2015 Cadence Design Systems Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/io.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/io.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
|
static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
|
||||||
|
pgprot_t prot)
|
||||||
|
{
|
||||||
|
unsigned long offset = paddr & ~PAGE_MASK;
|
||||||
|
unsigned long pfn = __phys_to_pfn(paddr);
|
||||||
|
struct vm_struct *area;
|
||||||
|
unsigned long vaddr;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
paddr &= PAGE_MASK;
|
||||||
|
|
||||||
|
WARN_ON(pfn_valid(pfn));
|
||||||
|
|
||||||
|
size = PAGE_ALIGN(offset + size);
|
||||||
|
|
||||||
|
area = get_vm_area(size, VM_IOREMAP);
|
||||||
|
if (!area)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
vaddr = (unsigned long)area->addr;
|
||||||
|
area->phys_addr = paddr;
|
||||||
|
|
||||||
|
err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
vunmap((void *)vaddr);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
flush_cache_vmap(vaddr, vaddr + size);
|
||||||
|
return (void __iomem *)(offset + vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
|
||||||
|
{
|
||||||
|
return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(xtensa_ioremap_nocache);
|
||||||
|
|
||||||
|
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
|
||||||
|
{
|
||||||
|
return xtensa_ioremap(addr, size, PAGE_KERNEL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(xtensa_ioremap_cache);
|
||||||
|
|
||||||
|
void xtensa_iounmap(volatile void __iomem *io_addr)
|
||||||
|
{
|
||||||
|
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
||||||
|
|
||||||
|
vunmap(addr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(xtensa_iounmap);
|
Loading…
Reference in New Issue