tile: various minor cleanups to hardwall subsystem
First, clean up active hardwalls in exit_thread(). This is a better place than in arch_release_thread_info(). Second, mask out any non-online cpus from the cpumask after validating any required semantics of the cpu set. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
parent
8e42e211e4
commit
7d937719e3
arch/tile/kernel
|
@ -540,6 +540,14 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Eliminate cpus that are not part of this Linux client.
|
||||||
|
* Note that this allows for configurations that we might not want to
|
||||||
|
* support, such as one client on every even cpu, another client on
|
||||||
|
* every odd cpu.
|
||||||
|
*/
|
||||||
|
cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
|
||||||
|
|
||||||
/* Confirm it doesn't overlap and add it to the list. */
|
/* Confirm it doesn't overlap and add it to the list. */
|
||||||
spin_lock_irqsave(&hwt->lock, flags);
|
spin_lock_irqsave(&hwt->lock, flags);
|
||||||
list_for_each_entry(iter, &hwt->list, list) {
|
list_for_each_entry(iter, &hwt->list, list) {
|
||||||
|
@ -612,7 +620,7 @@ static int hardwall_activate(struct hardwall_info *info)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Deactivate a task's hardwall. Must hold lock for hardwall_type.
|
* Deactivate a task's hardwall. Must hold lock for hardwall_type.
|
||||||
* This method may be called from free_task(), so we don't want to
|
* This method may be called from exit_thread(), so we don't want to
|
||||||
* rely on too many fields of struct task_struct still being valid.
|
* rely on too many fields of struct task_struct still being valid.
|
||||||
* We assume the cpus_allowed, pid, and comm fields are still valid.
|
* We assume the cpus_allowed, pid, and comm fields are still valid.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -74,19 +74,6 @@ void arch_release_thread_info(struct thread_info *info)
|
||||||
{
|
{
|
||||||
struct single_step_state *step_state = info->step_state;
|
struct single_step_state *step_state = info->step_state;
|
||||||
|
|
||||||
#ifdef CONFIG_HARDWALL
|
|
||||||
/*
|
|
||||||
* We free a thread_info from the context of the task that has
|
|
||||||
* been scheduled next, so the original task is already dead.
|
|
||||||
* Calling deactivate here just frees up the data structures.
|
|
||||||
* If the task we're freeing held the last reference to a
|
|
||||||
* hardwall fd, it would have been released prior to this point
|
|
||||||
* anyway via exit_files(), and the hardwall_task.info pointers
|
|
||||||
* would be NULL by now.
|
|
||||||
*/
|
|
||||||
hardwall_deactivate_all(info->task);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (step_state) {
|
if (step_state) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -564,7 +551,15 @@ void flush_thread(void)
|
||||||
*/
|
*/
|
||||||
void exit_thread(void)
|
void exit_thread(void)
|
||||||
{
|
{
|
||||||
/* Nothing */
|
#ifdef CONFIG_HARDWALL
|
||||||
|
/*
|
||||||
|
* Remove the task from the list of tasks that are associated
|
||||||
|
* with any live hardwalls. (If the task that is exiting held
|
||||||
|
* the last reference to a hardwall fd, it would already have
|
||||||
|
* been released and deactivated at this point.)
|
||||||
|
*/
|
||||||
|
hardwall_deactivate_all(current);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_regs(struct pt_regs *regs)
|
void show_regs(struct pt_regs *regs)
|
||||||
|
|
Loading…
Reference in New Issue