diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 4c32ede464b5..2b6146241bde 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -632,9 +632,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (size >= scache_size) r4k_blast_scache(); else { - unsigned long lsize = cpu_scache_line_size(); - unsigned long almask = ~(lsize - 1); - /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and @@ -643,9 +640,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) * hit ops with insufficient alignment. Solved by * aligning the address to cache line size. */ - cache_op(Hit_Writeback_Inv_SD, addr & almask); - cache_op(Hit_Writeback_Inv_SD, - (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); } __sync(); @@ -655,12 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { - unsigned long lsize = cpu_dcache_line_size(); - unsigned long almask = ~(lsize - 1); - R4600_HIT_CACHEOP_WAR_IMPL; - cache_op(Hit_Writeback_Inv_D, addr & almask); - cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); }