mmiowb: Hook up mmiowb helpers to spinlocks and generic I/O accessors
Removing explicit calls to mmiowb() from driver code means that we must now call into the generic mmiowb_spin_{lock,unlock}() functions from the core spinlock code. In order to elide barriers following critical sections without any I/O writes, we also hook into the asm-generic I/O routines. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
fdcd06a8ab
commit
60ca1e5a20
|
@ -19,6 +19,7 @@
|
|||
#include <asm-generic/iomap.h>
|
||||
#endif
|
||||
|
||||
#include <asm/mmiowb.h>
|
||||
#include <asm-generic/pci_iomap.h>
|
||||
|
||||
#ifndef mmiowb
|
||||
|
@ -49,7 +50,7 @@
|
|||
|
||||
/* serialize device access against a spin_unlock, usually handled there. */
|
||||
#ifndef __io_aw
|
||||
#define __io_aw() barrier()
|
||||
#define __io_aw() mmiowb_set_pending()
|
||||
#endif
|
||||
|
||||
#ifndef __io_pbw
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
#include <linux/stringify.h>
|
||||
#include <linux/bottom_half.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/mmiowb.h>
|
||||
|
||||
|
||||
/*
|
||||
|
@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
|
|||
{
|
||||
__acquire(lock);
|
||||
arch_spin_lock(&lock->raw_lock);
|
||||
mmiowb_spin_lock();
|
||||
}
|
||||
|
||||
#ifndef arch_spin_lock_flags
|
||||
|
@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo
|
|||
{
|
||||
__acquire(lock);
|
||||
arch_spin_lock_flags(&lock->raw_lock, *flags);
|
||||
mmiowb_spin_lock();
|
||||
}
|
||||
|
||||
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
return arch_spin_trylock(&(lock)->raw_lock);
|
||||
int ret = arch_spin_trylock(&(lock)->raw_lock);
|
||||
|
||||
if (ret)
|
||||
mmiowb_spin_lock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
||||
{
|
||||
mmiowb_spin_unlock();
|
||||
arch_spin_unlock(&lock->raw_lock);
|
||||
__release(lock);
|
||||
}
|
||||
|
|
|
@ -111,6 +111,7 @@ void do_raw_spin_lock(raw_spinlock_t *lock)
|
|||
{
|
||||
debug_spin_lock_before(lock);
|
||||
arch_spin_lock(&lock->raw_lock);
|
||||
mmiowb_spin_lock();
|
||||
debug_spin_lock_after(lock);
|
||||
}
|
||||
|
||||
|
@ -118,8 +119,10 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
|
|||
{
|
||||
int ret = arch_spin_trylock(&lock->raw_lock);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
mmiowb_spin_lock();
|
||||
debug_spin_lock_after(lock);
|
||||
}
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
* Must not happen on UP:
|
||||
|
@ -131,6 +134,7 @@ int do_raw_spin_trylock(raw_spinlock_t *lock)
|
|||
|
||||
void do_raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
mmiowb_spin_unlock();
|
||||
debug_spin_unlock(lock);
|
||||
arch_spin_unlock(&lock->raw_lock);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue