Merge branch 'wq/for-3.14-fixes' into wq/for-3.15

To receive 70044d71d3 ("firewire: don't use PREPARE_DELAYED_WORK").
There will be further related updates in for-3.15 branch.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2014-03-07 10:20:20 -05:00
commit 7104ce9b34
5 changed files with 37 additions and 15 deletions

View File

@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
old->config_rom_retries = 0; old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev)); fw_notice(card, "rediscovered device %s\n", dev_name(dev));
PREPARE_DELAYED_WORK(&old->work, fw_device_update); old->workfn = fw_device_update;
fw_schedule_device_work(old, 0); fw_schedule_device_work(old, 0);
if (current_node == card->root_node) if (current_node == card->root_node)
@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
if (atomic_cmpxchg(&device->state, if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING, FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY); fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else { } else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
dev_name(&device->device), fw_rcode_string(ret)); dev_name(&device->device), fw_rcode_string(ret));
gone: gone:
atomic_set(&device->state, FW_DEVICE_GONE); atomic_set(&device->state, FW_DEVICE_GONE);
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY); fw_schedule_device_work(device, SHUTDOWN_DELAY);
out: out:
if (node_id == card->root_node->node_id) if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0); fw_schedule_bm_work(card, 0);
} }
static void fw_device_workfn(struct work_struct *work)
{
struct fw_device *device = container_of(to_delayed_work(work),
struct fw_device, work);
device->workfn(work);
}
void fw_node_event(struct fw_card *card, struct fw_node *node, int event) void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{ {
struct fw_device *device; struct fw_device *device;
@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* power-up after getting plugged in. We schedule the * power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset. * first config rom scan half a second after bus reset.
*/ */
INIT_DELAYED_WORK(&device->work, fw_device_init); device->workfn = fw_device_init;
INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY); fw_schedule_device_work(device, INITIAL_DELAY);
break; break;
@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
if (atomic_cmpxchg(&device->state, if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING, FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); device->workfn = fw_device_refresh;
fw_schedule_device_work(device, fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY); device->is_local ? 0 : INITIAL_DELAY);
} }
@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
smp_wmb(); /* update node_id before generation */ smp_wmb(); /* update node_id before generation */
device->generation = card->generation; device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_update); device->workfn = fw_device_update;
fw_schedule_device_work(device, 0); fw_schedule_device_work(device, 0);
} }
break; break;
@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device = node->data; device = node->data;
if (atomic_xchg(&device->state, if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
} }

View File

@ -146,6 +146,7 @@ struct sbp2_logical_unit {
*/ */
int generation; int generation;
int retries; int retries;
work_func_t workfn;
struct delayed_work work; struct delayed_work work;
bool has_sdev; bool has_sdev;
bool blocked; bool blocked;
@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */ /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu); sbp2_set_busy_timeout(lu);
PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu); sbp2_agent_reset(lu);
/* This was a re-login. */ /* This was a re-login. */
@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
* If a bus reset happened, sbp2_update will have requeued * If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login. * lu->work already. Reset the work from reconnect to login.
*/ */
PREPARE_DELAYED_WORK(&lu->work, sbp2_login); lu->workfn = sbp2_login;
} }
static void sbp2_reconnect(struct work_struct *work) static void sbp2_reconnect(struct work_struct *work)
@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
lu->retries++ >= 5) { lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n"); dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0; lu->retries = 0;
PREPARE_DELAYED_WORK(&lu->work, sbp2_login); lu->workfn = sbp2_login;
} }
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
sbp2_conditionally_unblock(lu); sbp2_conditionally_unblock(lu);
} }
static void sbp2_lu_workfn(struct work_struct *work)
{
struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
struct sbp2_logical_unit, work);
lu->workfn(work);
}
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{ {
struct sbp2_logical_unit *lu; struct sbp2_logical_unit *lu;
@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
lu->blocked = false; lu->blocked = false;
++tgt->dont_block; ++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list); INIT_LIST_HEAD(&lu->orb_list);
INIT_DELAYED_WORK(&lu->work, sbp2_login); lu->workfn = sbp2_login;
INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list); list_add_tail(&lu->link, &tgt->lu_list);
return 0; return 0;

View File

@ -200,6 +200,7 @@ struct fw_device {
unsigned irmc:1; unsigned irmc:1;
unsigned bc_implemented:2; unsigned bc_implemented:2;
work_func_t workfn;
struct delayed_work work; struct delayed_work work;
struct fw_attribute_group attribute_group; struct fw_attribute_group attribute_group;
}; };

View File

@ -419,10 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \ static struct lock_class_key __key; \
const char *__lock_name; \ const char *__lock_name; \
\ \
if (__builtin_constant_p(fmt)) \ __lock_name = #fmt#args; \
__lock_name = (fmt); \
else \
__lock_name = #fmt; \
\ \
__alloc_workqueue_key((fmt), (flags), (max_active), \ __alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \ &__key, __lock_name, ##args); \

View File

@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker)
if (worker->flags & WORKER_IDLE) if (worker->flags & WORKER_IDLE)
pool->nr_idle--; pool->nr_idle--;
/*
* Once WORKER_DIE is set, the kworker may destroy itself at any
* point. Pin to ensure the task stays until we're done with it.
*/
get_task_struct(worker->task);
list_del_init(&worker->entry); list_del_init(&worker->entry);
worker->flags |= WORKER_DIE; worker->flags |= WORKER_DIE;
@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker)
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
kthread_stop(worker->task); kthread_stop(worker->task);
put_task_struct(worker->task);
kfree(worker); kfree(worker);
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);