ARC: [mm] Aliasing VIPT dcache support 2/4

This is the meat of the series which prevents any dcache alias creation
by always keeping the U and K mapping of a page congruent.
If a mapping already exists, and other tries to access the page, prev
one is flushed to physical page (wback+inv)

Essentially flush_dcache_page()/copy_user_highpage() create K-mapping
of a page, but try to defer flushing, unless U-mapping exist.
When page is actually mapped to userspace, update_mmu_cache() flushes
the K-mapping (in certain cases this can be optimised out)

Additonally flush_cache_mm(), flush_cache_range(), flush_cache_page()
handle the puring of stale userspace mappings on exit/munmap...

flush_anon_page() handles the existing U-mapping for anon page before
kernel reads it via the GUP path.

Note that while not complete, this is enough to boot a simple
dynamically linked Busybox based rootfs

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
Vineet Gupta 2013-05-09 21:54:51 +05:30
parent 6ec18a81b2
commit 4102b53392
6 changed files with 223 additions and 22 deletions

View File

@ -182,6 +182,10 @@ config ARC_CACHE_PAGES
Note that Global I/D ENABLE + Per Page DISABLE works but corollary Note that Global I/D ENABLE + Per Page DISABLE works but corollary
Global DISABLE + Per Page ENABLE won't work Global DISABLE + Per Page ENABLE won't work
config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$"
default n
endif #ARC_CACHE endif #ARC_CACHE
config ARC_HAS_ICCM config ARC_HAS_ICCM

View File

@ -50,18 +50,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
/* #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
* VM callbacks when entire/range of user-space V-P mappings are
* torn-down/get-invalidated #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
*
* Currently we don't support D$ aliasing configs for our VIPT caches
* NOPS for VIPT Cache with non-aliasing D$ configurations only
*/
#define flush_cache_dup_mm(mm) /* called on fork */
#define flush_cache_mm(mm) /* called on munmap/exit */ #define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend) #define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
#else /* VIPT aliasing dcache */
/* To clear out stale userspace mappings */
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,unsigned long end);
void flush_cache_page(struct vm_area_struct *vma,
unsigned long user_addr, unsigned long page);
/*
* To make sure that userspace mapping is flushed to memory before
* get_user_pages() uses a kernel mapping to access the page
*/
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long u_vaddr);
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
/*
* Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration
*/
static inline int cache_is_vipt_aliasing(void)
{
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
return 1;
#else
return 0;
#endif
}
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
/*
* checks if two addresses (after page aligning) index into same cache set
*/
#define addr_not_cache_congruent(addr1, addr2) \
cache_is_vipt_aliasing() ? \
(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
memcpy(dst, src, len); \ memcpy(dst, src, len); \

View File

@ -16,13 +16,27 @@
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr) #define free_user_page(page, addr) free_page(addr)
/* TBD: for now don't worry about VIPT D$ aliasing */
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
#define clear_user_page(addr, vaddr, pg) clear_page(addr) #define clear_user_page(addr, vaddr, pg) clear_page(addr)
#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
#else /* VIPT aliasing dcache */
struct vm_area_struct;
struct page;
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma);
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
#undef STRICT_MM_TYPECHECKS #undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS

View File

@ -30,13 +30,20 @@ do { \
/* /*
* This pair is called at time of munmap/exit to flush cache and TLB entries * This pair is called at time of munmap/exit to flush cache and TLB entries
* for mappings being torn down. * for mappings being torn down.
* 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
* as we don't support aliasing configs in our VIPT D$.
* 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
* *
* Note, read http://lkml.org/lkml/2004/1/15/6 * Note, read http://lkml.org/lkml/2004/1/15/6
*/ */
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
#define tlb_start_vma(tlb, vma) #define tlb_start_vma(tlb, vma)
#else
#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
#endif
#define tlb_end_vma(tlb, vma) \ #define tlb_end_vma(tlb, vma) \
do { \ do { \

View File

@ -68,6 +68,7 @@
#include <linux/mmu_context.h> #include <linux/mmu_context.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cachectl.h> #include <asm/cachectl.h>
#include <asm/setup.h> #include <asm/setup.h>
@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
int way_pg_ratio = way_pg_ratio; int way_pg_ratio = way_pg_ratio;
int dcache_does_alias;
char str[256]; char str[256];
printk(arc_cache_mumbojumbo(0, str, sizeof(str))); printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@ -184,9 +186,13 @@ chk_dc:
panic("Cache H/W doesn't match kernel Config"); panic("Cache H/W doesn't match kernel Config");
} }
dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
/* check for D-Cache aliasing */ /* check for D-Cache aliasing */
if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) if (dcache_does_alias && !cache_is_vipt_aliasing())
panic("D$ aliasing not handled right now\n"); panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
else if (!dcache_does_alias && cache_is_vipt_aliasing())
panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
#endif #endif
/* Set the default Invalidate Mode to "simpy discard dirty lines" /* Set the default Invalidate Mode to "simpy discard dirty lines"
@ -312,7 +318,7 @@ static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
} }
} }
/* For kernel mappings cache op index is same as paddr */ /* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
/* /*
@ -464,10 +470,47 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
* Exported APIs * Exported APIs
*/ */
/*
* Handle cache congruency of kernel and userspace mappings of page when kernel
* writes-to/reads-from
*
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
* -In SMP, if hardware caches are coherent
*
* There's a corollary case, where kernel READs from a userspace mapped page.
* If the U-mapping is not congruent to to K-mapping, former needs flushing.
*/
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
/* Make a note that dcache is not yet flushed for this page */ struct address_space *mapping;
set_bit(PG_arch_1, &page->flags);
if (!cache_is_vipt_aliasing()) {
set_bit(PG_arch_1, &page->flags);
return;
}
/* don't handle anon pages here */
mapping = page_mapping(page);
if (!mapping)
return;
/*
* pagecache page, file not yet mapped to userspace
* Make a note that K-mapping is dirty
*/
if (!mapping_mapped(mapping)) {
set_bit(PG_arch_1, &page->flags);
} else if (page_mapped(page)) {
/* kernel reading from page with U-mapping */
void *paddr = page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr);
}
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
@ -612,6 +655,87 @@ noinline void flush_cache_all(void)
} }
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
void flush_cache_mm(struct mm_struct *mm)
{
flush_cache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
unsigned int paddr = pfn << PAGE_SHIFT;
__sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_cache_all();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
void *kfrom = page_address(from);
void *kto = page_address(to);
int clean_src_k_mappings = 0;
/*
* If SRC page was already mapped in userspace AND it's U-mapping is
* not congruent with K-mapping, sync former to physical page so that
* K-mapping in memcpy below, sees the right data
*
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well
*/
if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
__flush_dcache_page(kfrom, u_vaddr);
clean_src_k_mappings = 1;
}
copy_page(kto, kfrom);
/*
* Mark DST page K-mapping as dirty for a later finalization by
* update_mmu_cache(). Although the finalization could have been done
* here as well (given that both vaddr/paddr are available).
* But update_mmu_cache() already has code to do that for other
* non copied user pages (e.g. read faults which wire in pagecache page
* directly).
*/
set_bit(PG_arch_1, &to->flags);
/*
* if SRC was already usermapped and non-congruent to kernel mapping
* sync the kernel mapping back to physical page
*/
if (clean_src_k_mappings) {
__flush_dcache_page(kfrom, kfrom);
} else {
set_bit(PG_arch_1, &from->flags);
}
}
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{
clear_page(to);
set_bit(PG_arch_1, &page->flags);
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
__flush_dcache_page(page_address(page), u_vaddr);
__flush_dcache_page(page_address(page), page_address(page));
}
#endif
/********************************************************************** /**********************************************************************
* Explicit Cache flush request from user space via syscall * Explicit Cache flush request from user space via syscall
* Needed for JITs which generate code on the fly * Needed for JITs which generate code on the fly

View File

@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
/* /*
* Called at the end of pagefault, for a userspace mapped page * Called at the end of pagefault, for a userspace mapped page
* -pre-install the corresponding TLB entry into MMU * -pre-install the corresponding TLB entry into MMU
* -Finalize the delayed D-cache flush (wback+inv kernel mapping) * -Finalize the delayed D-cache flush of kernel mapping of page due to
* flush_dcache_page(), copy_user_page()
*
* Note that flush (when done) involves both WBACK - so physical page is
* in sync as well as INV - so any non-congruent aliases don't remain
*/ */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep) pte_t *ptep)
{ {
unsigned long vaddr = vaddr_unaligned & PAGE_MASK; unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
create_tlb(vma, vaddr, ptep); create_tlb(vma, vaddr, ptep);
/* icache doesn't snoop dcache, thus needs to be made coherent here */ /*
if (vma->vm_flags & VM_EXEC) { * Exec page : Independent of aliasing/page-color considerations,
* since icache doesn't snoop dcache on ARC, any dirty
* K-mapping of a code page needs to be wback+inv so that
* icache fetch by userspace sees code correctly.
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
* so userspace sees the right data.
* (Avoids the flush for Non-exec + congruent mapping case)
*/
if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
struct page *page = pfn_to_page(pte_pfn(*ptep)); struct page *page = pfn_to_page(pte_pfn(*ptep));
/* if page was dcache dirty, flush now */
int dirty = test_and_clear_bit(PG_arch_1, &page->flags); int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
if (dirty) { if (dirty) {
unsigned long paddr = pte_val(*ptep) & PAGE_MASK; /* wback + inv dcache lines */
__flush_dcache_page(paddr, paddr); __flush_dcache_page(paddr, paddr);
__inv_icache_page(paddr, vaddr);
/* invalidate any existing icache lines */
if (vma->vm_flags & VM_EXEC)
__inv_icache_page(paddr, vaddr);
} }
} }
} }