Merge branch 'core/urgent' into core/rcu
Merge reason: Pick up RCU fixlet to base further commits on. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
7e1a2766e6
|
@ -1029,7 +1029,6 @@ static inline
|
|||
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
||||
struct futex_hash_bucket *hb)
|
||||
{
|
||||
drop_futex_key_refs(&q->key);
|
||||
get_futex_key_refs(key);
|
||||
q->key = *key;
|
||||
|
||||
|
@ -1227,6 +1226,7 @@ retry_private:
|
|||
*/
|
||||
if (ret == 1) {
|
||||
WARN_ON(pi_state);
|
||||
drop_count++;
|
||||
task_count++;
|
||||
ret = get_futex_value_locked(&curval2, uaddr2);
|
||||
if (!ret)
|
||||
|
@ -1305,6 +1305,7 @@ retry_private:
|
|||
if (ret == 1) {
|
||||
/* We got the lock. */
|
||||
requeue_pi_wake_futex(this, &key2, hb2);
|
||||
drop_count++;
|
||||
continue;
|
||||
} else if (ret) {
|
||||
/* -EDEADLK */
|
||||
|
@ -2126,7 +2127,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
|
|||
plist_del(&q->list, &q->list.plist);
|
||||
|
||||
/* Handle spurious wakeups gracefully */
|
||||
ret = -EAGAIN;
|
||||
ret = -EWOULDBLOCK;
|
||||
if (timeout && !timeout->task)
|
||||
ret = -ETIMEDOUT;
|
||||
else if (signal_pending(current))
|
||||
|
@ -2207,7 +2208,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
|
|||
debug_rt_mutex_init_waiter(&rt_waiter);
|
||||
rt_waiter.task = NULL;
|
||||
|
||||
retry:
|
||||
key2 = FUTEX_KEY_INIT;
|
||||
ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -2302,9 +2302,6 @@ out_put_keys:
|
|||
out_key2:
|
||||
put_futex_key(fshared, &key2);
|
||||
|
||||
/* Spurious wakeup ? */
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
out:
|
||||
if (to) {
|
||||
hrtimer_cancel(&to->timer);
|
||||
|
|
|
@ -61,7 +61,7 @@ static struct lock_class_key rcu_root_class;
|
|||
NUM_RCU_LVL_2, \
|
||||
NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
|
||||
}, \
|
||||
.signaled = RCU_SIGNAL_INIT, \
|
||||
.signaled = RCU_GP_IDLE, \
|
||||
.gpnum = -300, \
|
||||
.completed = -300, \
|
||||
.onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
|
||||
|
@ -659,14 +659,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|||
* irqs disabled.
|
||||
*/
|
||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||
rcu_preempt_check_blocked_tasks(rnp);
|
||||
rnp->qsmask = rnp->qsmaskinit;
|
||||
rnp->gpnum = rsp->gpnum;
|
||||
spin_unlock(&rnp->lock); /* irqs already disabled. */
|
||||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
}
|
||||
|
||||
rnp = rcu_get_root(rsp);
|
||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
|
||||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
spin_unlock_irqrestore(&rsp->onofflock, flags);
|
||||
}
|
||||
|
||||
|
@ -708,6 +711,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
|
|||
{
|
||||
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
||||
rsp->completed = rsp->gpnum;
|
||||
rsp->signaled = RCU_GP_IDLE;
|
||||
rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
|
||||
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
|
||||
}
|
||||
|
@ -915,7 +919,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
break;
|
||||
}
|
||||
rcu_preempt_offline_tasks(rsp, rnp, rdp);
|
||||
|
||||
/*
|
||||
* If there was a task blocking the current grace period,
|
||||
* and if all CPUs have checked in, we need to propagate
|
||||
* the quiescent state up the rcu_node hierarchy. But that
|
||||
* is inconvenient at the moment due to deadlock issues if
|
||||
* this should end the current grace period. So set the
|
||||
* offlined CPU's bit in ->qsmask in order to force the
|
||||
* next force_quiescent_state() invocation to clean up this
|
||||
* mess in a deadlock-free manner.
|
||||
*/
|
||||
if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
|
||||
rnp->qsmask |= mask;
|
||||
|
||||
mask = rnp->grpmask;
|
||||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
rnp = rnp->parent;
|
||||
|
@ -1151,9 +1168,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
|
|||
}
|
||||
spin_unlock(&rnp->lock);
|
||||
switch (signaled) {
|
||||
case RCU_GP_IDLE:
|
||||
case RCU_GP_INIT:
|
||||
|
||||
break; /* grace period still initializing, ignore. */
|
||||
break; /* grace period idle or initializing, ignore. */
|
||||
|
||||
case RCU_SAVE_DYNTICK:
|
||||
|
||||
|
@ -1167,7 +1185,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
|
|||
|
||||
/* Update state, record completion counter. */
|
||||
spin_lock(&rnp->lock);
|
||||
if (lastcomp == rsp->completed) {
|
||||
if (lastcomp == rsp->completed &&
|
||||
rsp->signaled == RCU_SAVE_DYNTICK) {
|
||||
rsp->signaled = RCU_FORCE_QS;
|
||||
dyntick_record_completed(rsp, lastcomp);
|
||||
}
|
||||
|
|
|
@ -201,9 +201,10 @@ struct rcu_data {
|
|||
};
|
||||
|
||||
/* Values for signaled field in struct rcu_state. */
|
||||
#define RCU_GP_INIT 0 /* Grace period being initialized. */
|
||||
#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
|
||||
#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
|
||||
#define RCU_GP_IDLE 0 /* No grace period in progress. */
|
||||
#define RCU_GP_INIT 1 /* Grace period being initialized. */
|
||||
#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
|
||||
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
|
||||
#ifdef CONFIG_NO_HZ
|
||||
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
|
||||
#else /* #ifdef CONFIG_NO_HZ */
|
||||
|
@ -306,9 +307,9 @@ static void rcu_print_task_stall(struct rcu_node *rnp);
|
|||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp);
|
||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp);
|
||||
static void rcu_preempt_offline_cpu(int cpu);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
static void rcu_preempt_check_callbacks(int cpu);
|
||||
|
|
|
@ -304,21 +304,25 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
|||
* parent is to remove the need for rcu_read_unlock_special() to
|
||||
* make more than two attempts to acquire the target rcu_node's lock.
|
||||
*
|
||||
* Returns 1 if there was previously a task blocking the current grace
|
||||
* period on the specified rcu_node structure.
|
||||
*
|
||||
* The caller must hold rnp->lock with irqs disabled.
|
||||
*/
|
||||
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp)
|
||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp)
|
||||
{
|
||||
int i;
|
||||
struct list_head *lp;
|
||||
struct list_head *lp_root;
|
||||
int retval = rcu_preempted_readers(rnp);
|
||||
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
||||
struct task_struct *tp;
|
||||
|
||||
if (rnp == rnp_root) {
|
||||
WARN_ONCE(1, "Last CPU thought to be offlined?");
|
||||
return; /* Shouldn't happen: at least one CPU online. */
|
||||
return 0; /* Shouldn't happen: at least one CPU online. */
|
||||
}
|
||||
WARN_ON_ONCE(rnp != rdp->mynode &&
|
||||
(!list_empty(&rnp->blocked_tasks[0]) ||
|
||||
|
@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
|||
spin_unlock(&rnp_root->lock); /* irqs remain disabled */
|
||||
}
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -532,12 +538,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
|||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, it never needs to migrate
|
||||
* tasks that were blocked within RCU read-side critical sections.
|
||||
* tasks that were blocked within RCU read-side critical sections, and
|
||||
* such non-existent tasks cannot possibly have been blocking the current
|
||||
* grace period.
|
||||
*/
|
||||
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp)
|
||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -330,9 +330,9 @@ done:
|
|||
*/
|
||||
static void free_user(struct user_struct *up, unsigned long flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&uidhash_lock, flags);
|
||||
INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
|
||||
schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
|
||||
spin_unlock_irqrestore(&uidhash_lock, flags);
|
||||
}
|
||||
|
||||
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
|
||||
|
|
Loading…
Reference in New Issue