Do not flush the cache in flush_cache_v(un)map for VIPT caches
In case of non-aliasing VIPT caches, there is no need to flush the whole cache when new mapping is created. The patch introduces this condition check. In the non-aliasing VIPT case flush_cache_vmap() needs a DSB since the set_pte_at() function called from vmap_pte_range() does not have such barrier (done usually via TLB flushing functions). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
24b647a042
commit
376e14218d
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include <asm/glue.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/cachetype.h>
|
||||
|
||||
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
|
||||
|
||||
|
@ -295,16 +296,6 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
|
||||
* vmalloc, ioremap etc) in kernel space for pages. Since the
|
||||
* direct-mappings of these pages may contain cached data, we need
|
||||
* to do a full cache flush to ensure that writebacks don't corrupt
|
||||
* data placed into these pages via the new mappings.
|
||||
*/
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
/*
|
||||
* Copy user data from/to a page which is mapped into a different
|
||||
* processes address space. Really, we want to allow our "user
|
||||
|
@ -444,4 +435,29 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
|
|||
dmac_inv_range(start, start + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
|
||||
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
|
||||
* caches, since the direct-mappings of these pages may contain cached
|
||||
* data, we need to do a full cache flush to ensure that writebacks
|
||||
* don't corrupt data placed into these pages via the new mappings.
|
||||
*/
|
||||
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!cache_is_vipt_nonaliasing())
|
||||
flush_cache_all();
|
||||
else
|
||||
/*
|
||||
* set_pte_at() called from vmap_pte_range() does not
|
||||
* have a DSB after cleaning the cache line.
|
||||
*/
|
||||
dsb();
|
||||
}
|
||||
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!cache_is_vipt_nonaliasing())
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue