2007-05-09 05:27:46 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/mm/cache-v7.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
|
|
|
* Copyright (C) 2005 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This is the "shell" of the ARMv7 processor support.
|
|
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/assembler.h>
|
2012-04-27 20:08:53 +08:00
|
|
|
#include <asm/errno.h>
|
2009-10-07 00:57:09 +08:00
|
|
|
#include <asm/unwind.h>
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
#include "proc-macros.S"
|
|
|
|
|
2013-02-12 07:30:32 +08:00
|
|
|
/*
|
|
|
|
* The secondary kernel init calls v7_flush_dcache_all before it enables
|
|
|
|
* the L1; however, the L1 comes out of reset in an undefined state, so
|
|
|
|
* the clean + invalidate performed by v7_flush_dcache_all causes a bunch
|
|
|
|
* of cache lines with uninitialized data and uninitialized tags to get
|
|
|
|
* written out to memory, which does really unpleasant things to the main
|
|
|
|
* processor. We fix this by performing an invalidate, rather than a
|
|
|
|
* clean + invalidate, before jumping into the kernel.
|
|
|
|
*
|
|
|
|
* This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs
|
|
|
|
* to be called for both secondary cores startup and primary core resume
|
|
|
|
* procedures.
|
|
|
|
*/
|
|
|
|
ENTRY(v7_invalidate_l1)
|
|
|
|
mov r0, #0
|
|
|
|
mcr p15, 2, r0, c0, c0, 0
|
|
|
|
mrc p15, 1, r0, c0, c0, 0
|
|
|
|
|
2015-04-03 18:10:46 +08:00
|
|
|
movw r1, #0x7fff
|
2013-02-12 07:30:32 +08:00
|
|
|
and r2, r1, r0, lsr #13
|
|
|
|
|
2015-04-03 18:10:46 +08:00
|
|
|
movw r1, #0x3ff
|
2013-02-12 07:30:32 +08:00
|
|
|
|
|
|
|
and r3, r1, r0, lsr #3 @ NumWays - 1
|
|
|
|
add r2, r2, #1 @ NumSets
|
|
|
|
|
|
|
|
and r0, r0, #0x7
|
|
|
|
add r0, r0, #4 @ SetShift
|
|
|
|
|
|
|
|
clz r1, r3 @ WayShift
|
|
|
|
add r4, r3, #1 @ NumWays
|
|
|
|
1: sub r2, r2, #1 @ NumSets--
|
|
|
|
mov r3, r4 @ Temp = NumWays
|
|
|
|
2: subs r3, r3, #1 @ Temp--
|
|
|
|
mov r5, r3, lsl r1
|
|
|
|
mov r6, r2, lsl r0
|
|
|
|
orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
|
|
|
|
mcr p15, 0, r5, c7, c6, 2
|
|
|
|
bgt 2b
|
|
|
|
cmp r2, #0
|
|
|
|
bgt 1b
|
2014-05-10 01:36:27 +08:00
|
|
|
dsb st
|
2013-02-12 07:30:32 +08:00
|
|
|
isb
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2013-02-12 07:30:32 +08:00
|
|
|
ENDPROC(v7_invalidate_l1)
|
|
|
|
|
2010-09-22 00:16:40 +08:00
|
|
|
/*
|
|
|
|
* v7_flush_icache_all()
|
|
|
|
*
|
|
|
|
* Flush the whole I-cache.
|
|
|
|
*
|
|
|
|
* Registers:
|
|
|
|
* r0 - set to 0
|
|
|
|
*/
|
|
|
|
ENTRY(v7_flush_icache_all)
|
|
|
|
mov r0, #0
|
|
|
|
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
|
|
|
|
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2010-09-22 00:16:40 +08:00
|
|
|
ENDPROC(v7_flush_icache_all)
|
|
|
|
|
2012-09-06 21:05:13 +08:00
|
|
|
/*
|
|
|
|
* v7_flush_dcache_louis()
|
|
|
|
*
|
|
|
|
* Flush the D-cache up to the Level of Unification Inner Shareable
|
|
|
|
*
|
|
|
|
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
|
|
|
|
*/
|
|
|
|
|
|
|
|
ENTRY(v7_flush_dcache_louis)
|
|
|
|
dmb @ ensure ordering with previous memory accesses
|
|
|
|
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
|
ARM: cache-v7: shift CLIDR to extract appropriate field before masking
Rather than have code which masks and then shifts, such as:
mrc p15, 1, r0, c0, c0, 1
ALT_SMP(ands r3, r0, #7 << 21)
ALT_UP( ands r3, r0, #7 << 27)
ALT_SMP(mov r3, r3, lsr #20)
ALT_UP( mov r3, r3, lsr #26)
re-arrange this as a shift and then mask. The masking is the same for
each field which we want to extract, so this allows the mask to be
shared amongst code paths:
mrc p15, 1, r0, c0, c0, 1
ALT_SMP(mov r3, r0, lsr #20)
ALT_UP( mov r3, r0, lsr #26)
ands r3, r3, #7 << 1
Use this method for the LoUIS, LoUU and LoC fields.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2015-04-03 18:15:53 +08:00
|
|
|
ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position
|
|
|
|
ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position
|
|
|
|
ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr
|
2013-06-07 17:35:35 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_643719
|
|
|
|
ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
|
2014-06-30 23:29:12 +08:00
|
|
|
ALT_UP(reteq lr) @ LoUU is zero, so nothing to do
|
2015-04-03 18:10:46 +08:00
|
|
|
movweq r1, #:lower16:0x410fc090 @ ID of ARM Cortex A9 r0p?
|
|
|
|
movteq r1, #:upper16:0x410fc090
|
2013-06-07 17:35:35 +08:00
|
|
|
biceq r2, r2, #0x0000000f @ clear minor revision number
|
|
|
|
teqeq r2, r1 @ test for errata affected core and if so...
|
ARM: cache-v7: shift CLIDR to extract appropriate field before masking
Rather than have code which masks and then shifts, such as:
mrc p15, 1, r0, c0, c0, 1
ALT_SMP(ands r3, r0, #7 << 21)
ALT_UP( ands r3, r0, #7 << 27)
ALT_SMP(mov r3, r3, lsr #20)
ALT_UP( mov r3, r3, lsr #26)
re-arrange this as a shift and then mask. The masking is the same for
each field which we want to extract, so this allows the mask to be
shared amongst code paths:
mrc p15, 1, r0, c0, c0, 1
ALT_SMP(mov r3, r0, lsr #20)
ALT_UP( mov r3, r0, lsr #26)
ands r3, r3, #7 << 1
Use this method for the LoUIS, LoUU and LoC fields.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2015-04-03 18:15:53 +08:00
|
|
|
moveqs r3, #1 << 1 @ fix LoUIS value (and set flags state to 'ne')
|
2013-06-07 17:35:35 +08:00
|
|
|
#endif
|
2014-06-30 23:29:12 +08:00
|
|
|
reteq lr @ return if level == 0
|
2012-09-06 21:05:13 +08:00
|
|
|
mov r10, #0 @ r10 (starting level) = 0
|
2012-09-18 23:29:44 +08:00
|
|
|
b flush_levels @ start flushing cache levels
|
2012-09-06 21:05:13 +08:00
|
|
|
ENDPROC(v7_flush_dcache_louis)
|
|
|
|
|
2007-05-09 05:27:46 +08:00
|
|
|
/*
|
|
|
|
* v7_flush_dcache_all()
|
|
|
|
*
|
|
|
|
* Flush the whole D-cache.
|
|
|
|
*
|
2009-07-24 19:32:56 +08:00
|
|
|
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
|
2007-05-09 05:27:46 +08:00
|
|
|
*
|
|
|
|
* - mm - mm_struct describing address space
|
|
|
|
*/
|
|
|
|
ENTRY(v7_flush_dcache_all)
|
2008-11-06 21:23:07 +08:00
|
|
|
dmb @ ensure ordering with previous memory accesses
|
2007-05-09 05:27:46 +08:00
|
|
|
mrc p15, 1, r0, c0, c0, 1 @ read clidr
|
ARM: cache-v7: shift CLIDR to extract appropriate field before masking
Rather than have code which masks and then shifts, such as:
mrc p15, 1, r0, c0, c0, 1
ALT_SMP(ands r3, r0, #7 << 21)
ALT_UP( ands r3, r0, #7 << 27)
ALT_SMP(mov r3, r3, lsr #20)
ALT_UP( mov r3, r3, lsr #26)
re-arrange this as a shift and then mask. The masking is the same for
each field which we want to extract, so this allows the mask to be
shared amongst code paths:
mrc p15, 1, r0, c0, c0, 1
ALT_SMP(mov r3, r0, lsr #20)
ALT_UP( mov r3, r0, lsr #26)
ands r3, r3, #7 << 1
Use this method for the LoUIS, LoUU and LoC fields.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2015-04-03 18:15:53 +08:00
|
|
|
mov r3, r0, lsr #23 @ move LoC into position
|
|
|
|
ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
|
2007-05-09 05:27:46 +08:00
|
|
|
beq finished @ if loc is 0, then no need to clean
|
|
|
|
mov r10, #0 @ start clean at cache level 0
|
2012-09-18 23:29:44 +08:00
|
|
|
flush_levels:
|
2007-05-09 05:27:46 +08:00
|
|
|
add r2, r10, r10, lsr #1 @ work out 3x current cache level
|
|
|
|
mov r1, r0, lsr r2 @ extract cache type bits from clidr
|
|
|
|
and r1, r1, #7 @ mask of the bits for current cache only
|
|
|
|
cmp r1, #2 @ see what cache we have at this level
|
|
|
|
blt skip @ skip if no cache, or just i-cache
|
2012-02-08 02:42:07 +08:00
|
|
|
#ifdef CONFIG_PREEMPT
|
2012-02-15 23:01:42 +08:00
|
|
|
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
|
2012-02-08 02:42:07 +08:00
|
|
|
#endif
|
2007-05-09 05:27:46 +08:00
|
|
|
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
|
|
|
|
isb @ isb to sych the new cssr&csidr
|
|
|
|
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
|
2012-02-08 02:42:07 +08:00
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
restore_irqs_notrace r9
|
|
|
|
#endif
|
2007-05-09 05:27:46 +08:00
|
|
|
and r2, r1, #7 @ extract the length of the cache lines
|
|
|
|
add r2, r2, #4 @ add 4 (line length offset)
|
2015-04-03 18:10:46 +08:00
|
|
|
movw r4, #0x3ff
|
2007-05-09 05:27:46 +08:00
|
|
|
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
|
|
|
|
clz r5, r4 @ find bit position of way size increment
|
2015-04-03 18:10:46 +08:00
|
|
|
movw r7, #0x7fff
|
2007-05-09 05:27:46 +08:00
|
|
|
ands r7, r7, r1, lsr #13 @ extract max number of the index size
|
2012-09-18 23:29:44 +08:00
|
|
|
loop1:
|
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence
Set-associative caches on all v7 implementations map the index bits
to physical addresses LSBs and tag bits to MSBs. As the last level
of cache on current and upcoming ARM systems grows in size,
this means that under normal DRAM controller configurations, the
current v7 cache flush routine using set/way operations triggers a
DRAM memory controller precharge/activate for every cache line
writeback since the cache routine cleans lines by first fixing the
index and then looping through ways (index bits are mapped to lower
physical addresses on all v7 cache implementations; this means that,
with last level cache sizes in the order of MBytes, lines belonging
to the same set but different ways map to different DRAM pages).
Given the random content of cache tags, swapping the order between
indexes and ways loops do not prevent DRAM pages precharge and
activate cycles but at least, on average, improves the chances that
either multiple lines hit the same page or multiple lines belong to
different DRAM banks, improving throughput significantly.
This patch swaps the inner loops in the v7 cache flushing routine
to carry out the clean operations first on all sets belonging to
a given way (looping through sets) and then decrementing the way.
Benchmarks showed that by swapping the ordering in which sets and
ways are decremented in the v7 cache flushing routine, that uses
set/way operations, time required to flush caches is reduced
significantly, owing to improved writebacks throughput to the DRAM
controller.
Benchmarks results vary and depend heavily on the last level of
cache tag RAM content when cache is cleaned and invalidated, ranging
from 2x throughput when all tag RAM entries contain dirty lines
mapping to sequential pages of RAM to 1x (ie no improvement) when
all tag RAM accesses trigger a DRAM precharge/activate cycle, as the
current code implies on most DRAM controller configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
|
|
|
mov r9, r7 @ create working copy of max index
|
2012-09-18 23:29:44 +08:00
|
|
|
loop2:
|
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence
Set-associative caches on all v7 implementations map the index bits
to physical addresses LSBs and tag bits to MSBs. As the last level
of cache on current and upcoming ARM systems grows in size,
this means that under normal DRAM controller configurations, the
current v7 cache flush routine using set/way operations triggers a
DRAM memory controller precharge/activate for every cache line
writeback since the cache routine cleans lines by first fixing the
index and then looping through ways (index bits are mapped to lower
physical addresses on all v7 cache implementations; this means that,
with last level cache sizes in the order of MBytes, lines belonging
to the same set but different ways map to different DRAM pages).
Given the random content of cache tags, swapping the order between
indexes and ways loops do not prevent DRAM pages precharge and
activate cycles but at least, on average, improves the chances that
either multiple lines hit the same page or multiple lines belong to
different DRAM banks, improving throughput significantly.
This patch swaps the inner loops in the v7 cache flushing routine
to carry out the clean operations first on all sets belonging to
a given way (looping through sets) and then decrementing the way.
Benchmarks showed that by swapping the ordering in which sets and
ways are decremented in the v7 cache flushing routine, that uses
set/way operations, time required to flush caches is reduced
significantly, owing to improved writebacks throughput to the DRAM
controller.
Benchmarks results vary and depend heavily on the last level of
cache tag RAM content when cache is cleaned and invalidated, ranging
from 2x throughput when all tag RAM entries contain dirty lines
mapping to sequential pages of RAM to 1x (ie no improvement) when
all tag RAM accesses trigger a DRAM precharge/activate cycle, as the
current code implies on most DRAM controller configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
|
|
|
ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
|
|
|
|
THUMB( lsl r6, r4, r5 )
|
2009-07-24 19:32:56 +08:00
|
|
|
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
|
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence
Set-associative caches on all v7 implementations map the index bits
to physical addresses LSBs and tag bits to MSBs. As the last level
of cache on current and upcoming ARM systems grows in size,
this means that under normal DRAM controller configurations, the
current v7 cache flush routine using set/way operations triggers a
DRAM memory controller precharge/activate for every cache line
writeback since the cache routine cleans lines by first fixing the
index and then looping through ways (index bits are mapped to lower
physical addresses on all v7 cache implementations; this means that,
with last level cache sizes in the order of MBytes, lines belonging
to the same set but different ways map to different DRAM pages).
Given the random content of cache tags, swapping the order between
indexes and ways loops do not prevent DRAM pages precharge and
activate cycles but at least, on average, improves the chances that
either multiple lines hit the same page or multiple lines belong to
different DRAM banks, improving throughput significantly.
This patch swaps the inner loops in the v7 cache flushing routine
to carry out the clean operations first on all sets belonging to
a given way (looping through sets) and then decrementing the way.
Benchmarks showed that by swapping the ordering in which sets and
ways are decremented in the v7 cache flushing routine, that uses
set/way operations, time required to flush caches is reduced
significantly, owing to improved writebacks throughput to the DRAM
controller.
Benchmarks results vary and depend heavily on the last level of
cache tag RAM content when cache is cleaned and invalidated, ranging
from 2x throughput when all tag RAM entries contain dirty lines
mapping to sequential pages of RAM to 1x (ie no improvement) when
all tag RAM accesses trigger a DRAM precharge/activate cycle, as the
current code implies on most DRAM controller configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
|
|
|
ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
|
|
|
|
THUMB( lsl r6, r9, r2 )
|
2009-07-24 19:32:56 +08:00
|
|
|
THUMB( orr r11, r11, r6 ) @ factor index number into r11
|
2007-05-09 05:27:46 +08:00
|
|
|
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
|
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence
Set-associative caches on all v7 implementations map the index bits
to physical addresses LSBs and tag bits to MSBs. As the last level
of cache on current and upcoming ARM systems grows in size,
this means that under normal DRAM controller configurations, the
current v7 cache flush routine using set/way operations triggers a
DRAM memory controller precharge/activate for every cache line
writeback since the cache routine cleans lines by first fixing the
index and then looping through ways (index bits are mapped to lower
physical addresses on all v7 cache implementations; this means that,
with last level cache sizes in the order of MBytes, lines belonging
to the same set but different ways map to different DRAM pages).
Given the random content of cache tags, swapping the order between
indexes and ways loops do not prevent DRAM pages precharge and
activate cycles but at least, on average, improves the chances that
either multiple lines hit the same page or multiple lines belong to
different DRAM banks, improving throughput significantly.
This patch swaps the inner loops in the v7 cache flushing routine
to carry out the clean operations first on all sets belonging to
a given way (looping through sets) and then decrementing the way.
Benchmarks showed that by swapping the ordering in which sets and
ways are decremented in the v7 cache flushing routine, that uses
set/way operations, time required to flush caches is reduced
significantly, owing to improved writebacks throughput to the DRAM
controller.
Benchmarks results vary and depend heavily on the last level of
cache tag RAM content when cache is cleaned and invalidated, ranging
from 2x throughput when all tag RAM entries contain dirty lines
mapping to sequential pages of RAM to 1x (ie no improvement) when
all tag RAM accesses trigger a DRAM precharge/activate cycle, as the
current code implies on most DRAM controller configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
|
|
|
subs r9, r9, #1 @ decrement the index
|
2007-05-09 05:27:46 +08:00
|
|
|
bge loop2
|
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence
Set-associative caches on all v7 implementations map the index bits
to physical addresses LSBs and tag bits to MSBs. As the last level
of cache on current and upcoming ARM systems grows in size,
this means that under normal DRAM controller configurations, the
current v7 cache flush routine using set/way operations triggers a
DRAM memory controller precharge/activate for every cache line
writeback since the cache routine cleans lines by first fixing the
index and then looping through ways (index bits are mapped to lower
physical addresses on all v7 cache implementations; this means that,
with last level cache sizes in the order of MBytes, lines belonging
to the same set but different ways map to different DRAM pages).
Given the random content of cache tags, swapping the order between
indexes and ways loops do not prevent DRAM pages precharge and
activate cycles but at least, on average, improves the chances that
either multiple lines hit the same page or multiple lines belong to
different DRAM banks, improving throughput significantly.
This patch swaps the inner loops in the v7 cache flushing routine
to carry out the clean operations first on all sets belonging to
a given way (looping through sets) and then decrementing the way.
Benchmarks showed that by swapping the ordering in which sets and
ways are decremented in the v7 cache flushing routine, that uses
set/way operations, time required to flush caches is reduced
significantly, owing to improved writebacks throughput to the DRAM
controller.
Benchmarks results vary and depend heavily on the last level of
cache tag RAM content when cache is cleaned and invalidated, ranging
from 2x throughput when all tag RAM entries contain dirty lines
mapping to sequential pages of RAM to 1x (ie no improvement) when
all tag RAM accesses trigger a DRAM precharge/activate cycle, as the
current code implies on most DRAM controller configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
|
|
|
subs r4, r4, #1 @ decrement the way
|
2012-09-18 23:29:44 +08:00
|
|
|
bge loop1
|
2007-05-09 05:27:46 +08:00
|
|
|
skip:
|
|
|
|
add r10, r10, #2 @ increment cache number
|
|
|
|
cmp r3, r10
|
2012-09-18 23:29:44 +08:00
|
|
|
bgt flush_levels
|
2007-05-09 05:27:46 +08:00
|
|
|
finished:
|
|
|
|
mov r10, #0 @ swith back to cache level 0
|
|
|
|
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
|
2014-05-10 01:36:27 +08:00
|
|
|
dsb st
|
2007-05-09 05:27:46 +08:00
|
|
|
isb
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_flush_dcache_all)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_flush_cache_all()
|
|
|
|
*
|
|
|
|
* Flush the entire cache system.
|
|
|
|
* The data cache flush is now achieved using atomic clean / invalidates
|
|
|
|
* working outwards from L1 cache. This is done using Set/Way based cache
|
2011-03-31 09:57:33 +08:00
|
|
|
* maintenance instructions.
|
2007-05-09 05:27:46 +08:00
|
|
|
* The instruction cache can still be invalidated back to the point of
|
|
|
|
* unification in a single instruction.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
ENTRY(v7_flush_kern_cache_all)
|
2009-07-24 19:32:56 +08:00
|
|
|
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
|
|
|
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
|
2007-05-09 05:27:46 +08:00
|
|
|
bl v7_flush_dcache_all
|
|
|
|
mov r0, #0
|
2010-09-04 17:47:48 +08:00
|
|
|
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
|
|
|
|
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
|
2009-07-24 19:32:56 +08:00
|
|
|
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
|
|
|
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_flush_kern_cache_all)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
2012-09-06 21:05:13 +08:00
|
|
|
/*
|
|
|
|
* v7_flush_kern_cache_louis(void)
|
|
|
|
*
|
|
|
|
* Flush the data cache up to Level of Unification Inner Shareable.
|
|
|
|
* Invalidate the I-cache to the point of unification.
|
|
|
|
*/
|
|
|
|
ENTRY(v7_flush_kern_cache_louis)
|
|
|
|
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
|
|
|
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
|
|
|
|
bl v7_flush_dcache_louis
|
|
|
|
mov r0, #0
|
|
|
|
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
|
|
|
|
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
|
|
|
|
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
|
|
|
|
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2012-09-06 21:05:13 +08:00
|
|
|
ENDPROC(v7_flush_kern_cache_louis)
|
|
|
|
|
2007-05-09 05:27:46 +08:00
|
|
|
/*
|
|
|
|
* v7_flush_cache_all()
|
|
|
|
*
|
|
|
|
* Flush all TLB entries in a particular address space
|
|
|
|
*
|
|
|
|
* - mm - mm_struct describing address space
|
|
|
|
*/
|
|
|
|
ENTRY(v7_flush_user_cache_all)
|
|
|
|
/*FALLTHROUGH*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_flush_cache_range(start, end, flags)
|
|
|
|
*
|
|
|
|
* Flush a range of TLB entries in the specified address space.
|
|
|
|
*
|
|
|
|
* - start - start address (may not be aligned)
|
|
|
|
* - end - end address (exclusive, may not be aligned)
|
|
|
|
* - flags - vm_area_struct flags describing address space
|
|
|
|
*
|
|
|
|
* It is assumed that:
|
|
|
|
* - we have a VIPT cache.
|
|
|
|
*/
|
|
|
|
ENTRY(v7_flush_user_cache_range)
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_flush_user_cache_all)
|
|
|
|
ENDPROC(v7_flush_user_cache_range)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_coherent_kern_range(start,end)
|
|
|
|
*
|
|
|
|
* Ensure that the I and D caches are coherent within specified
|
|
|
|
* region. This is typically used when code has been written to
|
|
|
|
* a memory region, and will be executed.
|
|
|
|
*
|
|
|
|
* - start - virtual start address of region
|
|
|
|
* - end - virtual end address of region
|
|
|
|
*
|
|
|
|
* It is assumed that:
|
|
|
|
* - the Icache does not read data from the write buffer
|
|
|
|
*/
|
|
|
|
ENTRY(v7_coherent_kern_range)
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_coherent_user_range(start,end)
|
|
|
|
*
|
|
|
|
* Ensure that the I and D caches are coherent within specified
|
|
|
|
* region. This is typically used when code has been written to
|
|
|
|
* a memory region, and will be executed.
|
|
|
|
*
|
|
|
|
* - start - virtual start address of region
|
|
|
|
* - end - virtual end address of region
|
|
|
|
*
|
|
|
|
* It is assumed that:
|
|
|
|
* - the Icache does not read data from the write buffer
|
|
|
|
*/
|
|
|
|
ENTRY(v7_coherent_user_range)
|
2009-10-07 00:57:09 +08:00
|
|
|
UNWIND(.fnstart )
|
2007-05-09 05:27:46 +08:00
|
|
|
dcache_line_size r2, r3
|
|
|
|
sub r3, r2, #1
|
2010-12-07 23:56:29 +08:00
|
|
|
bic r12, r0, r3
|
2011-09-15 18:45:15 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_764369
|
|
|
|
ALT_SMP(W(dsb))
|
|
|
|
ALT_UP(W(nop))
|
|
|
|
#endif
|
2009-10-07 00:57:09 +08:00
|
|
|
1:
|
2010-12-07 23:56:29 +08:00
|
|
|
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
|
|
|
|
add r12, r12, r2
|
|
|
|
cmp r12, r1
|
|
|
|
blo 1b
|
2013-05-13 19:01:12 +08:00
|
|
|
dsb ishst
|
2010-12-07 23:56:29 +08:00
|
|
|
icache_line_size r2, r3
|
|
|
|
sub r3, r2, #1
|
|
|
|
bic r12, r0, r3
|
2009-10-07 00:57:09 +08:00
|
|
|
2:
|
2010-12-07 23:56:29 +08:00
|
|
|
USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
|
|
|
|
add r12, r12, r2
|
|
|
|
cmp r12, r1
|
|
|
|
blo 2b
|
2007-05-09 05:27:46 +08:00
|
|
|
mov r0, #0
|
2010-09-04 17:47:48 +08:00
|
|
|
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
|
|
|
|
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
|
2013-05-13 19:01:12 +08:00
|
|
|
dsb ishst
|
2007-05-09 05:27:46 +08:00
|
|
|
isb
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2009-10-07 00:57:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fault handling for the cache operation above. If the virtual address in r0
|
2012-04-27 20:08:53 +08:00
|
|
|
* isn't mapped, fail with -EFAULT.
|
2009-10-07 00:57:09 +08:00
|
|
|
*/
|
|
|
|
9001:
|
2012-09-28 09:12:45 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_775420
|
|
|
|
dsb
|
|
|
|
#endif
|
2012-04-27 20:08:53 +08:00
|
|
|
mov r0, #-EFAULT
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2009-10-07 00:57:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_coherent_kern_range)
|
|
|
|
ENDPROC(v7_coherent_user_range)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
/*
|
2009-11-26 20:56:21 +08:00
|
|
|
* v7_flush_kern_dcache_area(void *addr, size_t size)
|
2007-05-09 05:27:46 +08:00
|
|
|
*
|
|
|
|
* Ensure that the data held in the page kaddr is written back
|
|
|
|
* to the page in question.
|
|
|
|
*
|
2009-11-26 20:56:21 +08:00
|
|
|
* - addr - kernel address
|
|
|
|
* - size - region size
|
2007-05-09 05:27:46 +08:00
|
|
|
*/
|
2009-11-26 20:56:21 +08:00
|
|
|
ENTRY(v7_flush_kern_dcache_area)
|
2007-05-09 05:27:46 +08:00
|
|
|
dcache_line_size r2, r3
|
2009-11-26 20:56:21 +08:00
|
|
|
add r1, r0, r1
|
2011-05-26 18:20:19 +08:00
|
|
|
sub r3, r2, #1
|
|
|
|
bic r0, r0, r3
|
2011-09-15 18:45:15 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_764369
|
|
|
|
ALT_SMP(W(dsb))
|
|
|
|
ALT_UP(W(nop))
|
|
|
|
#endif
|
2007-05-09 05:27:46 +08:00
|
|
|
1:
|
|
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
|
|
|
|
add r0, r0, r2
|
|
|
|
cmp r0, r1
|
|
|
|
blo 1b
|
2014-05-10 01:36:27 +08:00
|
|
|
dsb st
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2009-11-26 20:56:21 +08:00
|
|
|
ENDPROC(v7_flush_kern_dcache_area)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_dma_inv_range(start,end)
|
|
|
|
*
|
|
|
|
* Invalidate the data cache within the specified region; we will
|
|
|
|
* be performing a DMA operation in this region and we want to
|
|
|
|
* purge old data in the cache.
|
|
|
|
*
|
|
|
|
* - start - virtual start address of region
|
|
|
|
* - end - virtual end address of region
|
|
|
|
*/
|
2009-11-27 00:24:19 +08:00
|
|
|
v7_dma_inv_range:
|
2007-05-09 05:27:46 +08:00
|
|
|
dcache_line_size r2, r3
|
|
|
|
sub r3, r2, #1
|
|
|
|
tst r0, r3
|
|
|
|
bic r0, r0, r3
|
2011-09-15 18:45:15 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_764369
|
|
|
|
ALT_SMP(W(dsb))
|
|
|
|
ALT_UP(W(nop))
|
|
|
|
#endif
|
2007-05-09 05:27:46 +08:00
|
|
|
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
|
|
|
|
|
|
|
|
tst r1, r3
|
|
|
|
bic r1, r1, r3
|
|
|
|
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
|
|
|
|
1:
|
|
|
|
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
|
|
|
|
add r0, r0, r2
|
|
|
|
cmp r0, r1
|
|
|
|
blo 1b
|
2014-05-10 01:36:27 +08:00
|
|
|
dsb st
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_dma_inv_range)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_dma_clean_range(start,end)
|
|
|
|
* - start - virtual start address of region
|
|
|
|
* - end - virtual end address of region
|
|
|
|
*/
|
2009-11-27 00:24:19 +08:00
|
|
|
v7_dma_clean_range:
|
2007-05-09 05:27:46 +08:00
|
|
|
dcache_line_size r2, r3
|
|
|
|
sub r3, r2, #1
|
|
|
|
bic r0, r0, r3
|
2011-09-15 18:45:15 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_764369
|
|
|
|
ALT_SMP(W(dsb))
|
|
|
|
ALT_UP(W(nop))
|
|
|
|
#endif
|
2007-05-09 05:27:46 +08:00
|
|
|
1:
|
|
|
|
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
|
|
|
|
add r0, r0, r2
|
|
|
|
cmp r0, r1
|
|
|
|
blo 1b
|
2014-05-10 01:36:27 +08:00
|
|
|
dsb st
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_dma_clean_range)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* v7_dma_flush_range(start,end)
|
|
|
|
* - start - virtual start address of region
|
|
|
|
* - end - virtual end address of region
|
|
|
|
*/
|
|
|
|
ENTRY(v7_dma_flush_range)
|
|
|
|
dcache_line_size r2, r3
|
|
|
|
sub r3, r2, #1
|
|
|
|
bic r0, r0, r3
|
2011-09-15 18:45:15 +08:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_764369
|
|
|
|
ALT_SMP(W(dsb))
|
|
|
|
ALT_UP(W(nop))
|
|
|
|
#endif
|
2007-05-09 05:27:46 +08:00
|
|
|
1:
|
|
|
|
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
|
|
|
|
add r0, r0, r2
|
|
|
|
cmp r0, r1
|
|
|
|
blo 1b
|
2014-05-10 01:36:27 +08:00
|
|
|
dsb st
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(v7_dma_flush_range)
|
2007-05-09 05:27:46 +08:00
|
|
|
|
2009-11-27 00:19:58 +08:00
|
|
|
/*
|
|
|
|
* dma_map_area(start, size, dir)
|
|
|
|
* - start - kernel virtual start address
|
|
|
|
* - size - size of region
|
|
|
|
* - dir - DMA direction
|
|
|
|
*/
|
|
|
|
ENTRY(v7_dma_map_area)
|
|
|
|
add r1, r1, r0
|
2009-11-01 00:52:16 +08:00
|
|
|
teq r2, #DMA_FROM_DEVICE
|
|
|
|
beq v7_dma_inv_range
|
|
|
|
b v7_dma_clean_range
|
2009-11-27 00:19:58 +08:00
|
|
|
ENDPROC(v7_dma_map_area)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dma_unmap_area(start, size, dir)
|
|
|
|
* - start - kernel virtual start address
|
|
|
|
* - size - size of region
|
|
|
|
* - dir - DMA direction
|
|
|
|
*/
|
|
|
|
ENTRY(v7_dma_unmap_area)
|
2009-11-01 00:52:16 +08:00
|
|
|
add r1, r1, r0
|
|
|
|
teq r2, #DMA_TO_DEVICE
|
|
|
|
bne v7_dma_inv_range
|
2014-06-30 23:29:12 +08:00
|
|
|
ret lr
|
2009-11-27 00:19:58 +08:00
|
|
|
ENDPROC(v7_dma_unmap_area)
|
|
|
|
|
2007-05-09 05:27:46 +08:00
|
|
|
__INITDATA
|
|
|
|
|
2011-06-24 00:16:25 +08:00
|
|
|
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
|
|
|
|
define_cache_functions v7
|