forked from OSchip/llvm-project
[builtins] Align addresses to cache lines in __clear_cache for aarch64
This makes sure that the last cache line gets invalidated properly. This matches the example code at http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/BABJDBHI.html, and also matches what libgcc does. Differential Revision: https://reviews.llvm.org/D42196 llvm-svn: 323315
This commit is contained in:
parent
50acecf2ab
commit
09bc73d11f
|
@ -163,12 +163,14 @@ void __clear_cache(void *start, void *end) {
|
|||
* uintptr_t in case this runs in an IPL32 environment.
|
||||
*/
|
||||
const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
|
||||
for (addr = xstart; addr < xend; addr += dcache_line_size)
|
||||
for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
|
||||
addr += dcache_line_size)
|
||||
__asm __volatile("dc cvau, %0" :: "r"(addr));
|
||||
__asm __volatile("dsb ish");
|
||||
|
||||
const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
|
||||
for (addr = xstart; addr < xend; addr += icache_line_size)
|
||||
for (addr = xstart & ~(icache_line_size - 1); addr < xend;
|
||||
addr += icache_line_size)
|
||||
__asm __volatile("ic ivau, %0" :: "r"(addr));
|
||||
__asm __volatile("isb sy");
|
||||
#elif defined (__powerpc64__)
|
||||
|
|
Loading…
Reference in New Issue