locking, lib/atomic64: Annotate atomic64_lock::lock as raw
The spinlock protected atomic64 operations must be irq safe as they are used in hard interrupt context and cannot be preempted on -rt: NIP [c068b218] rt_spin_lock_slowlock+0x78/0x3a8 LR [c068b1e0] rt_spin_lock_slowlock+0x40/0x3a8 Call Trace: [eb459b90] [c068b1e0] rt_spin_lock_slowlock+0x40/0x3a8 (unreliable) [eb459c20] [c068bdb0] rt_spin_lock+0x40/0x98 [eb459c40] [c03d2a14] atomic64_read+0x48/0x84 [eb459c60] [c001aaf4] perf_event_interrupt+0xec/0x28c [eb459d10] [c0010138] performance_monitor_exception+0x7c/0x150 [eb459d30] [c0014170] ret_from_except_full+0x0/0x4c So annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Shan Hai <haishan.bai@gmail.com> Reviewed-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3b8f404815
commit
f59ca05871
|
@ -29,7 +29,7 @@
|
|||
* Ensure each lock is in a separate cacheline.
|
||||
*/
|
||||
static union {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
char pad[L1_CACHE_BYTES];
|
||||
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
|
||||
|
||||
|
@ -48,9 +48,9 @@ long long atomic64_read(const atomic64_t *v)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_read);
|
||||
|
@ -60,9 +60,9 @@ void atomic64_set(atomic64_t *v, long long i)
|
|||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter = i;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_set);
|
||||
|
||||
|
@ -71,9 +71,9 @@ void atomic64_add(long long a, atomic64_t *v)
|
|||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter += a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add);
|
||||
|
||||
|
@ -83,9 +83,9 @@ long long atomic64_add_return(long long a, atomic64_t *v)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter += a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_return);
|
||||
|
@ -95,9 +95,9 @@ void atomic64_sub(long long a, atomic64_t *v)
|
|||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter -= a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_sub);
|
||||
|
||||
|
@ -107,9 +107,9 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter -= a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_sub_return);
|
||||
|
@ -120,11 +120,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter - 1;
|
||||
if (val >= 0)
|
||||
v->counter = val;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_dec_if_positive);
|
||||
|
@ -135,11 +135,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
if (val == o)
|
||||
v->counter = n;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_cmpxchg);
|
||||
|
@ -150,10 +150,10 @@ long long atomic64_xchg(atomic64_t *v, long long new)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
v->counter = new;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_xchg);
|
||||
|
@ -164,12 +164,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
|||
spinlock_t *lock = lock_addr(v);
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
if (v->counter != u) {
|
||||
v->counter += a;
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_unless);
|
||||
|
@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < NR_LOCKS; ++i)
|
||||
spin_lock_init(&atomic64_lock[i].lock);
|
||||
raw_spin_lock_init(&atomic64_lock[i].lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue