Merge branch 'core/futexes' into core/core

This commit is contained in:
Ingo Molnar 2008-12-25 13:54:14 +01:00
commit cc37d3d206
2 changed files with 37 additions and 27 deletions

View File

@ -25,7 +25,8 @@ union ktime;
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
#define FUTEX_CLOCK_REALTIME 256
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)

View File

@ -92,11 +92,12 @@ struct futex_pi_state {
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakup is always to make the first condition true, then
* wake up q->waiters, then make the second condition true.
* wake up q->waiter, then make the second condition true.
*/
struct futex_q {
struct plist_node list;
wait_queue_head_t waiters;
/* There can only be a single waiter */
wait_queue_head_t waiter;
/* Which hash list lock to use: */
spinlock_t *lock_ptr;
@ -573,7 +574,7 @@ static void wake_futex(struct futex_q *q)
* The lock in wake_up_all() is a crucial memory barrier after the
* plist_del() and also before assigning to q->lock_ptr.
*/
wake_up_all(&q->waiters);
wake_up(&q->waiter);
/*
* The waiting task can free the futex_q as soon as this is written,
* without taking any locks. This must come last.
@ -930,7 +931,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
init_waitqueue_head(&q->waiters);
init_waitqueue_head(&q->waiter);
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
@ -1142,12 +1143,13 @@ handle_fault:
* In case we must use restart_block to restart a futex_wait,
* we encode in the 'flags' shared capability
*/
#define FLAGS_SHARED 1
#define FLAGS_SHARED 0x01
#define FLAGS_CLOCKRT 0x02
static long futex_wait_restart(struct restart_block *restart);
static int futex_wait(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset)
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct task_struct *curr = current;
DECLARE_WAITQUEUE(wait, curr);
@ -1220,7 +1222,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
/* add_wait_queue is the barrier after __set_current_state. */
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&q.waiters, &wait);
add_wait_queue(&q.waiter, &wait);
/*
* !plist_node_empty() is safe here without any lock.
* q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@ -1233,8 +1235,10 @@ static int futex_wait(u32 __user *uaddr, int fshared,
slack = current->timer_slack_ns;
if (rt_task(current))
slack = 0;
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_on_stack(&t.timer,
clockrt ? CLOCK_REALTIME :
CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(&t, current);
hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
@ -1289,6 +1293,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
if (clockrt)
restart->futex.flags |= FLAGS_CLOCKRT;
return -ERESTART_RESTARTBLOCK;
}
@ -1312,7 +1318,8 @@ static long futex_wait_restart(struct restart_block *restart)
if (restart->futex.flags & FLAGS_SHARED)
fshared = 1;
return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
restart->futex.bitset);
restart->futex.bitset,
restart->futex.flags & FLAGS_CLOCKRT);
}
@ -1558,12 +1565,11 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
uaddr_faulted:
/*
* We have to r/w *(int __user *)uaddr, but we can't modify it
* non-atomically. Therefore, if get_user below is not
* enough, we need to handle the fault ourselves, while
* still holding the mmap_sem.
*
* ... and hb->lock. :-) --ANK
* We have to r/w *(int __user *)uaddr, and we have to modify it
* atomically. Therefore, if we continue to fault after get_user()
* below, we need to handle the fault ourselves, while still holding
* the mmap_sem. This can occur if the uaddr is under contention as
* we have to drop the mmap_sem in order to call get_user().
*/
queue_unlock(&q, hb);
@ -1575,7 +1581,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
}
ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT))
if (!ret)
goto retry;
if (to)
@ -1669,12 +1675,11 @@ out:
pi_faulted:
/*
* We have to r/w *(int __user *)uaddr, but we can't modify it
* non-atomically. Therefore, if get_user below is not
* enough, we need to handle the fault ourselves, while
* still holding the mmap_sem.
*
* ... and hb->lock. --ANK
* We have to r/w *(int __user *)uaddr, and we have to modify it
* atomically. Therefore, if we continue to fault after get_user()
* below, we need to handle the fault ourselves, while still holding
* the mmap_sem. This can occur if the uaddr is under contention as
* we have to drop the mmap_sem in order to call get_user().
*/
spin_unlock(&hb->lock);
@ -1687,7 +1692,7 @@ pi_faulted:
}
ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT))
if (!ret)
goto retry;
return ret;
@ -1905,18 +1910,22 @@ void exit_robust_list(struct task_struct *curr)
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
int ret = -ENOSYS;
int clockrt, ret = -ENOSYS;
int cmd = op & FUTEX_CMD_MASK;
int fshared = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
fshared = 1;
clockrt = op & FUTEX_CLOCK_REALTIME;
if (clockrt && cmd != FUTEX_WAIT_BITSET)
return -ENOSYS;
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
ret = futex_wait(uaddr, fshared, val, timeout, val3);
ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
break;
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;