robust futex thread exit race
Calling handle_futex_death in exit_robust_list for the different robust mutexes of a thread basically frees the mutex. Another thread might grab the lock immediately which updates the next pointer of the mutex. fetch_robust_entry over the next pointer might therefore branch into the robust mutex list of a different thread. This can cause two problems: 1) some mutexes held by the dead thread are not getting freed and 2) some mutexs held by a different thread are freed. The next point need to be read before calling handle_futex_death. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8792f961ba
commit
9f96cb1e8b
|
@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
|
||||||
void exit_robust_list(struct task_struct *curr)
|
void exit_robust_list(struct task_struct *curr)
|
||||||
{
|
{
|
||||||
struct robust_list_head __user *head = curr->robust_list;
|
struct robust_list_head __user *head = curr->robust_list;
|
||||||
struct robust_list __user *entry, *pending;
|
struct robust_list __user *entry, *next_entry, *pending;
|
||||||
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
|
unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
|
||||||
unsigned long futex_offset;
|
unsigned long futex_offset;
|
||||||
|
int rc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fetch the list head (which was registered earlier, via
|
* Fetch the list head (which was registered earlier, via
|
||||||
|
@ -1965,11 +1966,13 @@ void exit_robust_list(struct task_struct *curr)
|
||||||
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
|
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (pending)
|
next_entry = NULL; /* avoid warning with gcc */
|
||||||
handle_futex_death((void __user *)pending + futex_offset,
|
|
||||||
curr, pip);
|
|
||||||
|
|
||||||
while (entry != &head->list) {
|
while (entry != &head->list) {
|
||||||
|
/*
|
||||||
|
* Fetch the next entry in the list before calling
|
||||||
|
* handle_futex_death:
|
||||||
|
*/
|
||||||
|
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
|
||||||
/*
|
/*
|
||||||
* A pending lock might already be on the list, so
|
* A pending lock might already be on the list, so
|
||||||
* don't process it twice:
|
* don't process it twice:
|
||||||
|
@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
|
||||||
if (handle_futex_death((void __user *)entry + futex_offset,
|
if (handle_futex_death((void __user *)entry + futex_offset,
|
||||||
curr, pi))
|
curr, pi))
|
||||||
return;
|
return;
|
||||||
/*
|
if (rc)
|
||||||
* Fetch the next entry in the list:
|
|
||||||
*/
|
|
||||||
if (fetch_robust_entry(&entry, &entry->next, &pi))
|
|
||||||
return;
|
return;
|
||||||
|
entry = next_entry;
|
||||||
|
pi = next_pi;
|
||||||
/*
|
/*
|
||||||
* Avoid excessively long or circular lists:
|
* Avoid excessively long or circular lists:
|
||||||
*/
|
*/
|
||||||
|
@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pending)
|
||||||
|
handle_futex_death((void __user *)pending + futex_offset,
|
||||||
|
curr, pip);
|
||||||
}
|
}
|
||||||
|
|
||||||
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
||||||
|
|
|
@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
|
||||||
void compat_exit_robust_list(struct task_struct *curr)
|
void compat_exit_robust_list(struct task_struct *curr)
|
||||||
{
|
{
|
||||||
struct compat_robust_list_head __user *head = curr->compat_robust_list;
|
struct compat_robust_list_head __user *head = curr->compat_robust_list;
|
||||||
struct robust_list __user *entry, *pending;
|
struct robust_list __user *entry, *next_entry, *pending;
|
||||||
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
|
unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
|
||||||
compat_uptr_t uentry, upending;
|
compat_uptr_t uentry, next_uentry, upending;
|
||||||
compat_long_t futex_offset;
|
compat_long_t futex_offset;
|
||||||
|
int rc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fetch the list head (which was registered earlier, via
|
* Fetch the list head (which was registered earlier, via
|
||||||
|
@ -61,10 +62,15 @@ void compat_exit_robust_list(struct task_struct *curr)
|
||||||
if (fetch_robust_entry(&upending, &pending,
|
if (fetch_robust_entry(&upending, &pending,
|
||||||
&head->list_op_pending, &pip))
|
&head->list_op_pending, &pip))
|
||||||
return;
|
return;
|
||||||
if (pending)
|
|
||||||
handle_futex_death((void __user *)pending + futex_offset, curr, pip);
|
|
||||||
|
|
||||||
|
next_entry = NULL; /* avoid warning with gcc */
|
||||||
while (entry != (struct robust_list __user *) &head->list) {
|
while (entry != (struct robust_list __user *) &head->list) {
|
||||||
|
/*
|
||||||
|
* Fetch the next entry in the list before calling
|
||||||
|
* handle_futex_death:
|
||||||
|
*/
|
||||||
|
rc = fetch_robust_entry(&next_uentry, &next_entry,
|
||||||
|
(compat_uptr_t __user *)&entry->next, &next_pi);
|
||||||
/*
|
/*
|
||||||
* A pending lock might already be on the list, so
|
* A pending lock might already be on the list, so
|
||||||
* dont process it twice:
|
* dont process it twice:
|
||||||
|
@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
|
||||||
curr, pi))
|
curr, pi))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
if (rc)
|
||||||
* Fetch the next entry in the list:
|
|
||||||
*/
|
|
||||||
if (fetch_robust_entry(&uentry, &entry,
|
|
||||||
(compat_uptr_t __user *)&entry->next, &pi))
|
|
||||||
return;
|
return;
|
||||||
|
uentry = next_uentry;
|
||||||
|
entry = next_entry;
|
||||||
|
pi = next_pi;
|
||||||
/*
|
/*
|
||||||
* Avoid excessively long or circular lists:
|
* Avoid excessively long or circular lists:
|
||||||
*/
|
*/
|
||||||
|
@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
if (pending)
|
||||||
|
handle_futex_death((void __user *)pending + futex_offset,
|
||||||
|
curr, pip);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage long
|
asmlinkage long
|
||||||
|
|
Loading…
Reference in New Issue