2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/processor.h>
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
#include <arch/spr_def.h>
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
#include "spinlock_common.h"
|
|
|
|
|
|
|
|
void arch_spin_lock(arch_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
int my_ticket;
|
|
|
|
int iterations = 0;
|
|
|
|
int delta;
|
|
|
|
|
|
|
|
while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)
|
|
|
|
delay_backoff(iterations++);
|
|
|
|
|
|
|
|
/* Increment the next ticket number, implicitly releasing tns lock. */
|
|
|
|
lock->next_ticket = my_ticket + TICKET_QUANTUM;
|
|
|
|
|
|
|
|
/* Wait until it's our turn. */
|
|
|
|
while ((delta = my_ticket - lock->current_ticket) != 0)
|
|
|
|
relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(arch_spin_lock);
|
|
|
|
|
|
|
|
int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Grab a ticket; no need to retry if it's busy, we'll just
|
|
|
|
* treat that the same as "locked", since someone else
|
|
|
|
* will lock it momentarily anyway.
|
|
|
|
*/
|
|
|
|
int my_ticket = __insn_tns((void *)&lock->next_ticket);
|
|
|
|
|
|
|
|
if (my_ticket == lock->current_ticket) {
|
|
|
|
/* Not currently locked, so lock it by keeping this ticket. */
|
|
|
|
lock->next_ticket = my_ticket + TICKET_QUANTUM;
|
|
|
|
/* Success! */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(my_ticket & 1)) {
|
|
|
|
/* Release next_ticket. */
|
|
|
|
lock->next_ticket = my_ticket;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(arch_spin_trylock);
|
|
|
|
|
|
|
|
void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
u32 iterations = 0;
|
2015-04-29 01:02:26 +08:00
|
|
|
int curr = READ_ONCE(lock->current_ticket);
|
|
|
|
int next = READ_ONCE(lock->next_ticket);
|
|
|
|
|
|
|
|
/* Return immediately if unlocked. */
|
|
|
|
if (next == curr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Wait until the current locker has released the lock. */
|
|
|
|
do {
|
2010-05-29 11:09:12 +08:00
|
|
|
delay_backoff(iterations++);
|
2015-04-29 01:02:26 +08:00
|
|
|
} while (READ_ONCE(lock->current_ticket) == curr);
|
2016-05-26 16:35:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The TILE architecture doesn't do read speculation; therefore
|
|
|
|
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
|
|
|
|
*/
|
|
|
|
barrier();
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(arch_spin_unlock_wait);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The low byte is always reserved to be the marker for a "tns" operation
|
|
|
|
* since the low bit is set to "1" by a tns. The next seven bits are
|
|
|
|
* zeroes. The next byte holds the "next" writer value, i.e. the ticket
|
|
|
|
* available for the next task that wants to write. The third byte holds
|
|
|
|
* the current writer value, i.e. the writer who holds the current ticket.
|
|
|
|
* If current == next == 0, there are no interested writers.
|
|
|
|
*/
|
|
|
|
#define WR_NEXT_SHIFT _WR_NEXT_SHIFT
|
|
|
|
#define WR_CURR_SHIFT _WR_CURR_SHIFT
|
|
|
|
#define WR_WIDTH _WR_WIDTH
|
|
|
|
#define WR_MASK ((1 << WR_WIDTH) - 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The last eight bits hold the active reader count. This has to be
|
|
|
|
* zero before a writer can start to write.
|
|
|
|
*/
|
|
|
|
#define RD_COUNT_SHIFT _RD_COUNT_SHIFT
|
|
|
|
#define RD_COUNT_WIDTH _RD_COUNT_WIDTH
|
|
|
|
#define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1)
|
|
|
|
|
|
|
|
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
/*
|
|
|
|
* We can get the read lock if everything but the reader bits (which
|
|
|
|
* are in the high part of the word) is zero, i.e. no active or
|
|
|
|
* waiting writers, no tns.
|
|
|
|
*
|
|
|
|
* We guard the tns/store-back with an interrupt critical section to
|
|
|
|
* preserve the semantic that the same read lock can be acquired in an
|
|
|
|
* interrupt context.
|
|
|
|
*/
|
2013-05-09 18:36:53 +08:00
|
|
|
int arch_read_trylock(arch_rwlock_t *rwlock)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
u32 val;
|
|
|
|
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
|
|
|
|
val = __insn_tns((int *)&rwlock->lock);
|
|
|
|
if (likely((val << _RD_COUNT_WIDTH) == 0)) {
|
|
|
|
val += 1 << RD_COUNT_SHIFT;
|
|
|
|
rwlock->lock = val;
|
|
|
|
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
|
|
|
|
BUG_ON(val == 0); /* we don't expect wraparound */
|
|
|
|
return 1;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
if ((val & 1) == 0)
|
|
|
|
rwlock->lock = val;
|
|
|
|
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
|
|
|
|
return 0;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
EXPORT_SYMBOL(arch_read_trylock);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
/*
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
* Spin doing arch_read_trylock() until we acquire the lock.
|
2010-05-29 11:09:12 +08:00
|
|
|
* ISSUE: This approach can permanently starve readers. A reader who sees
|
|
|
|
* a writer could instead take a ticket lock (just like a writer would),
|
|
|
|
* and atomically enter read mode (with 1 reader) when it gets the ticket.
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
* This way both readers and writers would always make forward progress
|
2010-05-29 11:09:12 +08:00
|
|
|
* in a finite time.
|
|
|
|
*/
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
void arch_read_lock(arch_rwlock_t *rwlock)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
u32 iterations = 0;
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
while (unlikely(!arch_read_trylock(rwlock)))
|
2010-05-29 11:09:12 +08:00
|
|
|
delay_backoff(iterations++);
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(arch_read_lock);
|
|
|
|
|
|
|
|
void arch_read_unlock(arch_rwlock_t *rwlock)
|
|
|
|
{
|
|
|
|
u32 val, iterations = 0;
|
|
|
|
|
|
|
|
mb(); /* guarantee anything modified under the lock is visible */
|
|
|
|
for (;;) {
|
|
|
|
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
|
2010-05-29 11:09:12 +08:00
|
|
|
val = __insn_tns((int *)&rwlock->lock);
|
2011-12-13 06:34:06 +08:00
|
|
|
if (likely((val & 1) == 0)) {
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
|
|
|
|
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
|
|
|
|
delay_backoff(iterations++);
|
|
|
|
}
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
EXPORT_SYMBOL(arch_read_unlock);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
/*
|
|
|
|
* We don't need an interrupt critical section here (unlike for
|
|
|
|
* arch_read_lock) since we should never use a bare write lock where
|
|
|
|
* it could be interrupted by code that could try to re-acquire it.
|
|
|
|
*/
|
|
|
|
void arch_write_lock(arch_rwlock_t *rwlock)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The trailing underscore on this variable (and curr_ below)
|
|
|
|
* reminds us that the high bits are garbage; we mask them out
|
|
|
|
* when we compare them.
|
|
|
|
*/
|
|
|
|
u32 my_ticket_;
|
2010-11-15 22:18:49 +08:00
|
|
|
u32 iterations = 0;
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
u32 val = __insn_tns((int *)&rwlock->lock);
|
|
|
|
|
|
|
|
if (likely(val == 0)) {
|
|
|
|
rwlock->lock = 1 << _WR_NEXT_SHIFT;
|
|
|
|
return;
|
|
|
|
}
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2010-11-15 22:18:49 +08:00
|
|
|
/*
|
|
|
|
* Wait until there are no readers, then bump up the next
|
|
|
|
* field and capture the ticket value.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
if (!(val & 1)) {
|
|
|
|
if ((val >> RD_COUNT_SHIFT) == 0)
|
|
|
|
break;
|
|
|
|
rwlock->lock = val;
|
|
|
|
}
|
|
|
|
delay_backoff(iterations++);
|
|
|
|
val = __insn_tns((int *)&rwlock->lock);
|
|
|
|
}
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2010-11-15 22:18:49 +08:00
|
|
|
/* Take out the next ticket and extract my ticket value. */
|
|
|
|
rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
|
2010-05-29 11:09:12 +08:00
|
|
|
my_ticket_ = val >> WR_NEXT_SHIFT;
|
|
|
|
|
2010-11-15 22:18:49 +08:00
|
|
|
/* Wait until the "current" field matches our ticket. */
|
2010-05-29 11:09:12 +08:00
|
|
|
for (;;) {
|
|
|
|
u32 curr_ = val >> WR_CURR_SHIFT;
|
2010-11-15 22:18:49 +08:00
|
|
|
u32 delta = ((my_ticket_ - curr_) & WR_MASK);
|
2010-05-29 11:09:12 +08:00
|
|
|
if (likely(delta == 0))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Delay based on how many lock-holders are still out there. */
|
|
|
|
relax((256 / CYCLES_PER_RELAX_LOOP) * delta);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a non-tns value to check; we don't need to tns
|
|
|
|
* it ourselves. Since we're not tns'ing, we retry
|
|
|
|
* more rapidly to get a valid value.
|
|
|
|
*/
|
|
|
|
while ((val = rwlock->lock) & 1)
|
|
|
|
relax(4);
|
|
|
|
}
|
|
|
|
}
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
EXPORT_SYMBOL(arch_write_lock);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
int arch_write_trylock(arch_rwlock_t *rwlock)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
u32 val = __insn_tns((int *)&rwlock->lock);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
/*
|
|
|
|
* If a tns is in progress, or there's a waiting or active locker,
|
|
|
|
* or active readers, we can't take the lock, so give up.
|
|
|
|
*/
|
|
|
|
if (unlikely(val != 0)) {
|
|
|
|
if (!(val & 1))
|
|
|
|
rwlock->lock = val;
|
|
|
|
return 0;
|
|
|
|
}
|
2010-05-29 11:09:12 +08:00
|
|
|
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
/* Set the "next" field to mark it locked. */
|
|
|
|
rwlock->lock = 1 << _WR_NEXT_SHIFT;
|
|
|
|
return 1;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
EXPORT_SYMBOL(arch_write_trylock);
|
2010-05-29 11:09:12 +08:00
|
|
|
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
void arch_write_unlock(arch_rwlock_t *rwlock)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
u32 val, eq, mask;
|
|
|
|
|
|
|
|
mb(); /* guarantee anything modified under the lock is visible */
|
|
|
|
val = __insn_tns((int *)&rwlock->lock);
|
|
|
|
if (likely(val == (1 << _WR_NEXT_SHIFT))) {
|
|
|
|
rwlock->lock = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
while (unlikely(val & 1)) {
|
|
|
|
/* Limited backoff since we are the highest-priority task. */
|
|
|
|
relax(4);
|
|
|
|
val = __insn_tns((int *)&rwlock->lock);
|
|
|
|
}
|
|
|
|
mask = 1 << WR_CURR_SHIFT;
|
|
|
|
val = __insn_addb(val, mask);
|
|
|
|
eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
|
|
|
|
val = __insn_mz(eq & mask, val);
|
|
|
|
rwlock->lock = val;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
arch/tile: fix deadlock bugs in rwlock implementation
The first issue fixed in this patch is that pending rwlock write locks
could lock out new readers; this could cause a deadlock if a read lock was
held on cpu 1, a write lock was then attempted on cpu 2 and was pending,
and cpu 1 was interrupted and attempted to re-acquire a read lock.
The write lock code was modified to not lock out new readers.
The second issue fixed is that there was a narrow race window where a tns
instruction had been issued (setting the lock value to "1") and the store
instruction to reset the lock value correctly had not yet been issued.
In this case, if an interrupt occurred and the same cpu then tried to
manipulate the lock, it would find the lock value set to "1" and spin
forever, assuming some other cpu was partway through updating it. The fix
is to enforce an interrupt critical section around the tns/store pair.
In addition, this change now arranges to always validate that after
a readlock we have not wrapped around the count of readers, which
is only eight bits.
Since these changes make the rwlock "fast path" code heavier weight,
I decided to move all the rwlock code all out of line, leaving only the
conventional spinlock code with fastpath inlines. Since the read_lock
and read_trylock implementations ended up very similar, I just expressed
read_lock in terms of read_trylock.
As part of this change I also eliminate support for the now-obsolete
tns_atomic mode.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2011-03-02 02:30:15 +08:00
|
|
|
EXPORT_SYMBOL(arch_write_unlock);
|