x86/mm: Do not flush last cacheline twice in clflush_cache_range()
The current algorithm used in clflush_cache_range() can cause the last cache line of the buffer to be flushed twice. Fix that algorithm so that each cache line will only be flushed once. Reported-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luis R. Rodriguez <mcgrof@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hp.com> Link: http://lkml.kernel.org/r/1430259192-18802-1-git-send-email-ross.zwisler@linux.intel.com Link: http://lkml.kernel.org/r/1431332153-18566-5-git-send-email-bp@alien8.de [ Changed it to 'void *' to simplify the type conversions. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
030bbdbf4c
commit
6c434d6176
|
@ -129,16 +129,15 @@ within(unsigned long addr, unsigned long start, unsigned long end)
|
|||
*/
|
||||
void clflush_cache_range(void *vaddr, unsigned int size)
|
||||
{
|
||||
void *vend = vaddr + size - 1;
|
||||
unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
|
||||
void *vend = vaddr + size;
|
||||
void *p;
|
||||
|
||||
mb();
|
||||
|
||||
for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
|
||||
clflushopt(vaddr);
|
||||
/*
|
||||
* Flush any possible final partial cacheline:
|
||||
*/
|
||||
clflushopt(vend);
|
||||
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
|
||||
p < vend; p += boot_cpu_data.x86_clflush_size)
|
||||
clflushopt(p);
|
||||
|
||||
mb();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue