sched/headers: Move signal wakeup & sigpending methods from <linux/sched.h> into <linux/sched/signal.h>
This reduces the size of <linux/sched.h>. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9a07000400
commit
2a1f062a4a
|
@ -1931,37 +1931,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
|||
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
|
||||
}
|
||||
|
||||
static inline int restart_syscall(void)
|
||||
{
|
||||
set_tsk_thread_flag(current, TIF_SIGPENDING);
|
||||
return -ERESTARTNOINTR;
|
||||
}
|
||||
|
||||
static inline int signal_pending(struct task_struct *p)
|
||||
{
|
||||
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
||||
}
|
||||
|
||||
static inline int __fatal_signal_pending(struct task_struct *p)
|
||||
{
|
||||
return unlikely(sigismember(&p->pending.signal, SIGKILL));
|
||||
}
|
||||
|
||||
static inline int fatal_signal_pending(struct task_struct *p)
|
||||
{
|
||||
return signal_pending(p) && __fatal_signal_pending(p);
|
||||
}
|
||||
|
||||
static inline int signal_pending_state(long state, struct task_struct *p)
|
||||
{
|
||||
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
|
||||
return 0;
|
||||
if (!signal_pending(p))
|
||||
return 0;
|
||||
|
||||
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* cond_resched() and cond_resched_lock(): latency reduction via
|
||||
* explicit rescheduling in places that are safe. The return
|
||||
|
@ -2028,26 +1997,6 @@ static __always_inline bool need_resched(void)
|
|||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
|
||||
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
|
||||
|
||||
/*
|
||||
* Reevaluate whether the task has signals pending delivery.
|
||||
* Wake the task if so.
|
||||
* This is required every time the blocked sigset_t changes.
|
||||
* callers must hold sighand->siglock.
|
||||
*/
|
||||
extern void recalc_sigpending_and_wake(struct task_struct *t);
|
||||
extern void recalc_sigpending(void);
|
||||
|
||||
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
|
||||
|
||||
static inline void signal_wake_up(struct task_struct *t, bool resume)
|
||||
{
|
||||
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
|
||||
}
|
||||
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
|
||||
{
|
||||
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers for p->thread_info->cpu access. No-op on UP.
|
||||
*/
|
||||
|
|
|
@ -3,10 +3,10 @@
|
|||
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/cred.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/jobctl.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/cred.h>
|
||||
|
||||
/*
|
||||
* Types defining task->signal and task->sighand and APIs using them:
|
||||
|
@ -271,6 +271,57 @@ extern void sigqueue_free(struct sigqueue *);
|
|||
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
|
||||
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
|
||||
|
||||
static inline int restart_syscall(void)
|
||||
{
|
||||
set_tsk_thread_flag(current, TIF_SIGPENDING);
|
||||
return -ERESTARTNOINTR;
|
||||
}
|
||||
|
||||
static inline int signal_pending(struct task_struct *p)
|
||||
{
|
||||
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
|
||||
}
|
||||
|
||||
static inline int __fatal_signal_pending(struct task_struct *p)
|
||||
{
|
||||
return unlikely(sigismember(&p->pending.signal, SIGKILL));
|
||||
}
|
||||
|
||||
static inline int fatal_signal_pending(struct task_struct *p)
|
||||
{
|
||||
return signal_pending(p) && __fatal_signal_pending(p);
|
||||
}
|
||||
|
||||
static inline int signal_pending_state(long state, struct task_struct *p)
|
||||
{
|
||||
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
|
||||
return 0;
|
||||
if (!signal_pending(p))
|
||||
return 0;
|
||||
|
||||
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reevaluate whether the task has signals pending delivery.
|
||||
* Wake the task if so.
|
||||
* This is required every time the blocked sigset_t changes.
|
||||
* callers must hold sighand->siglock.
|
||||
*/
|
||||
extern void recalc_sigpending_and_wake(struct task_struct *t);
|
||||
extern void recalc_sigpending(void);
|
||||
|
||||
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
|
||||
|
||||
static inline void signal_wake_up(struct task_struct *t, bool resume)
|
||||
{
|
||||
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
|
||||
}
|
||||
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
|
||||
{
|
||||
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
|
||||
}
|
||||
|
||||
#ifdef TIF_RESTORE_SIGMASK
|
||||
/*
|
||||
* Legacy restore_sigmask accessors. These are inefficient on
|
||||
|
|
Loading…
Reference in New Issue