Merge git://git.infradead.org/users/dhowells/workq-2.6
* git://git.infradead.org/users/dhowells/workq-2.6: Actually update the fixed up compile failures. WorkQueue: Fix up arch-specific work items where possible WorkStruct: make allyesconfig WorkStruct: Pass the work_struct pointer instead of context data WorkStruct: Merge the pending bit into the wq_data pointer WorkStruct: Typedef the work function prototype WorkStruct: Separate delayable and non-delayable events.
This commit is contained in:
commit
dd8856bda5
|
@ -60,16 +60,16 @@ static int sharpsl_ac_check(void);
|
|||
static int sharpsl_fatal_check(void);
|
||||
static int sharpsl_average_value(int ad);
|
||||
static void sharpsl_average_clear(void);
|
||||
static void sharpsl_charge_toggle(void *private_);
|
||||
static void sharpsl_battery_thread(void *private_);
|
||||
static void sharpsl_charge_toggle(struct work_struct *private_);
|
||||
static void sharpsl_battery_thread(struct work_struct *private_);
|
||||
|
||||
|
||||
/*
|
||||
* Variables
|
||||
*/
|
||||
struct sharpsl_pm_status sharpsl_pm;
|
||||
DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
|
||||
DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
|
||||
DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
|
||||
DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
|
||||
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
|
||||
|
||||
|
||||
|
@ -116,7 +116,7 @@ void sharpsl_battery_kick(void)
|
|||
EXPORT_SYMBOL(sharpsl_battery_kick);
|
||||
|
||||
|
||||
static void sharpsl_battery_thread(void *private_)
|
||||
static void sharpsl_battery_thread(struct work_struct *private_)
|
||||
{
|
||||
int voltage, percent, apm_status, i = 0;
|
||||
|
||||
|
@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_)
|
|||
/* Corgi cannot confirm when battery fully charged so periodically kick! */
|
||||
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
|
||||
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
|
||||
schedule_work(&toggle_charger);
|
||||
schedule_delayed_work(&toggle_charger, 0);
|
||||
|
||||
while(1) {
|
||||
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
|
||||
|
@ -212,7 +212,7 @@ static void sharpsl_charge_off(void)
|
|||
sharpsl_pm_led(SHARPSL_LED_OFF);
|
||||
sharpsl_pm.charge_mode = CHRG_OFF;
|
||||
|
||||
schedule_work(&sharpsl_bat);
|
||||
schedule_delayed_work(&sharpsl_bat, 0);
|
||||
}
|
||||
|
||||
static void sharpsl_charge_error(void)
|
||||
|
@ -222,7 +222,7 @@ static void sharpsl_charge_error(void)
|
|||
sharpsl_pm.charge_mode = CHRG_ERROR;
|
||||
}
|
||||
|
||||
static void sharpsl_charge_toggle(void *private_)
|
||||
static void sharpsl_charge_toggle(struct work_struct *private_)
|
||||
{
|
||||
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
|
||||
|
||||
|
@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data)
|
|||
else if (sharpsl_pm.charge_mode == CHRG_ON)
|
||||
sharpsl_charge_off();
|
||||
|
||||
schedule_work(&sharpsl_bat);
|
||||
schedule_delayed_work(&sharpsl_bat, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data)
|
|||
sharpsl_charge_off();
|
||||
} else if (sharpsl_pm.full_count < 2) {
|
||||
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
|
||||
schedule_work(&toggle_charger);
|
||||
schedule_delayed_work(&toggle_charger, 0);
|
||||
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
|
||||
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
|
||||
schedule_work(&toggle_charger);
|
||||
schedule_delayed_work(&toggle_charger, 0);
|
||||
} else {
|
||||
sharpsl_charge_off();
|
||||
sharpsl_pm.charge_mode = CHRG_DONE;
|
||||
|
|
|
@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode)
|
|||
|
||||
cancel_delayed_work(&irda_config->gpio_expa);
|
||||
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
|
||||
schedule_work(&irda_config->gpio_expa);
|
||||
#error this is not permitted - mode is an argument variable
|
||||
schedule_delayed_work(&irda_config->gpio_expa, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = {
|
|||
.rows = 8,
|
||||
.cols = 8,
|
||||
.keymap = nokia770_keymap,
|
||||
.keymapsize = ARRAY_SIZE(nokia770_keymap)
|
||||
.keymapsize = ARRAY_SIZE(nokia770_keymap),
|
||||
.delay = 4,
|
||||
};
|
||||
|
||||
|
@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void)
|
|||
printk("HP connected\n");
|
||||
}
|
||||
|
||||
static void codec_delayed_power_down(void *arg)
|
||||
static void codec_delayed_power_down(struct work_struct *work)
|
||||
{
|
||||
down(&audio_pwr_sem);
|
||||
if (audio_pwr_state == -1)
|
||||
|
@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg)
|
|||
up(&audio_pwr_sem);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
|
||||
static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
|
||||
|
||||
static void nokia770_audio_pwr_down(void)
|
||||
{
|
||||
|
|
|
@ -35,7 +35,7 @@ static u8 hw_led_state;
|
|||
|
||||
static u8 tps_leds_change;
|
||||
|
||||
static void tps_work(void *unused)
|
||||
static void tps_work(struct work_struct *unused)
|
||||
{
|
||||
for (;;) {
|
||||
u8 leds;
|
||||
|
@ -61,7 +61,7 @@ static void tps_work(void *unused)
|
|||
}
|
||||
}
|
||||
|
||||
static DECLARE_WORK(work, tps_work, NULL);
|
||||
static DECLARE_WORK(work, tps_work);
|
||||
|
||||
#ifdef CONFIG_OMAP_OSK_MISTRAL
|
||||
|
||||
|
|
|
@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode)
|
|||
|
||||
cancel_delayed_work(&irda_config->gpio_expa);
|
||||
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
|
||||
schedule_work(&irda_config->gpio_expa);
|
||||
#error this is not permitted - mode is an argument variable
|
||||
schedule_delayed_work(&irda_config->gpio_expa, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD;
|
|||
|
||||
static int max7310_write(struct i2c_client *client, int address, int data);
|
||||
static struct i2c_client max7310_template;
|
||||
static void akita_ioexp_work(void *private_);
|
||||
static void akita_ioexp_work(struct work_struct *private_);
|
||||
|
||||
static struct device *akita_ioexp_device;
|
||||
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
|
||||
DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
|
||||
DECLARE_WORK(akita_ioexp, akita_ioexp_work);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit)
|
|||
EXPORT_SYMBOL(akita_set_ioexp);
|
||||
EXPORT_SYMBOL(akita_reset_ioexp);
|
||||
|
||||
static void akita_ioexp_work(void *private_)
|
||||
static void akita_ioexp_work(struct work_struct *private_)
|
||||
{
|
||||
if (akita_ioexp_device)
|
||||
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
|
||||
|
|
|
@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
|
|||
}
|
||||
}
|
||||
|
||||
static void mce_work_fn(void *data);
|
||||
static DECLARE_WORK(mce_work, mce_work_fn, NULL);
|
||||
static void mce_work_fn(struct work_struct *work);
|
||||
static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
|
||||
|
||||
static void mce_work_fn(void *data)
|
||||
static void mce_work_fn(struct work_struct *work)
|
||||
{
|
||||
on_each_cpu(mce_checkregs, NULL, 1, 1);
|
||||
schedule_delayed_work(&mce_work, MCE_RATE);
|
||||
|
|
|
@ -1049,13 +1049,15 @@ void cpu_exit_clear(void)
|
|||
|
||||
struct warm_boot_cpu_info {
|
||||
struct completion *complete;
|
||||
struct work_struct task;
|
||||
int apicid;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
static void __cpuinit do_warm_boot_cpu(void *p)
|
||||
static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
|
||||
{
|
||||
struct warm_boot_cpu_info *info = p;
|
||||
struct warm_boot_cpu_info *info =
|
||||
container_of(work, struct warm_boot_cpu_info, task);
|
||||
do_boot_cpu(info->apicid, info->cpu);
|
||||
complete(info->complete);
|
||||
}
|
||||
|
@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
|
|||
{
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
struct warm_boot_cpu_info info;
|
||||
struct work_struct task;
|
||||
int apicid, ret;
|
||||
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
|
||||
|
||||
|
@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
|
|||
info.complete = &done;
|
||||
info.apicid = apicid;
|
||||
info.cpu = cpu;
|
||||
INIT_WORK(&task, do_warm_boot_cpu, &info);
|
||||
INIT_WORK(&info.task, do_warm_boot_cpu);
|
||||
|
||||
tsc_sync_disabled = 1;
|
||||
|
||||
|
@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
|
|||
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
|
||||
KERNEL_PGD_PTRS);
|
||||
flush_tlb_all();
|
||||
schedule_work(&task);
|
||||
schedule_work(&info.task);
|
||||
wait_for_completion(&done);
|
||||
|
||||
tsc_sync_disabled = 0;
|
||||
|
|
|
@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0;
|
|||
static unsigned int cpufreq_init = 0;
|
||||
static struct work_struct cpufreq_delayed_get_work;
|
||||
|
||||
static void handle_cpufreq_delayed_get(void *v)
|
||||
static void handle_cpufreq_delayed_get(struct work_struct *work)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
|
@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
|
||||
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
|
||||
ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
if (!ret)
|
||||
|
|
|
@ -209,7 +209,7 @@ static void do_serial_bh(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void do_softint(void *private_)
|
||||
static void do_softint(struct work_struct *private_)
|
||||
{
|
||||
printk(KERN_ERR "simserial: do_softint called\n");
|
||||
}
|
||||
|
@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
|
|||
info->flags = sstate->flags;
|
||||
info->xmit_fifo_size = sstate->xmit_fifo_size;
|
||||
info->line = line;
|
||||
INIT_WORK(&info->work, do_softint, info);
|
||||
INIT_WORK(&info->work, do_softint);
|
||||
info->state = sstate;
|
||||
if (sstate->info) {
|
||||
kfree(info);
|
||||
|
|
|
@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
|
|||
* disable the cmc interrupt vector.
|
||||
*/
|
||||
static void
|
||||
ia64_mca_cmc_vector_disable_keventd(void *unused)
|
||||
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
|
||||
{
|
||||
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
|
||||
}
|
||||
|
@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
|
|||
* enable the cmc interrupt vector.
|
||||
*/
|
||||
static void
|
||||
ia64_mca_cmc_vector_enable_keventd(void *unused)
|
||||
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
|
||||
{
|
||||
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
|
||||
}
|
||||
|
@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
|||
monarch_cpu = -1;
|
||||
}
|
||||
|
||||
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
|
||||
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
|
||||
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
|
||||
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
|
||||
|
||||
/*
|
||||
* ia64_mca_cmc_int_handler
|
||||
|
|
|
@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
struct create_idle {
|
||||
struct work_struct work;
|
||||
struct task_struct *idle;
|
||||
struct completion done;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
void
|
||||
do_fork_idle(void *_c_idle)
|
||||
do_fork_idle(struct work_struct *work)
|
||||
{
|
||||
struct create_idle *c_idle = _c_idle;
|
||||
struct create_idle *c_idle =
|
||||
container_of(work, struct create_idle, work);
|
||||
|
||||
c_idle->idle = fork_idle(c_idle->cpu);
|
||||
complete(&c_idle->done);
|
||||
|
@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
|
|||
{
|
||||
int timeout;
|
||||
struct create_idle c_idle = {
|
||||
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
|
||||
.cpu = cpu,
|
||||
.done = COMPLETION_INITIALIZER(c_idle.done),
|
||||
};
|
||||
DECLARE_WORK(work, do_fork_idle, &c_idle);
|
||||
|
||||
c_idle.idle = get_idle_for_cpu(cpu);
|
||||
if (c_idle.idle) {
|
||||
|
@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
|
|||
* We can't use kernel_thread since we must avoid to reschedule the child.
|
||||
*/
|
||||
if (!keventd_up() || current_is_keventd())
|
||||
work.func(work.data);
|
||||
c_idle.work.func(&c_idle.work);
|
||||
else {
|
||||
schedule_work(&work);
|
||||
schedule_work(&c_idle.work);
|
||||
wait_for_completion(&c_idle.done);
|
||||
}
|
||||
|
||||
|
|
|
@ -319,7 +319,7 @@ static void sp_cleanup(void)
|
|||
static int channel_open = 0;
|
||||
|
||||
/* the work handler */
|
||||
static void sp_work(void *data)
|
||||
static void sp_work(struct work_struct *unused)
|
||||
{
|
||||
if (!channel_open) {
|
||||
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
|
||||
|
@ -354,7 +354,7 @@ static void startwork(int vpe)
|
|||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work, sp_work, NULL);
|
||||
INIT_WORK(&work, sp_work);
|
||||
queue_work(workqueue, &work);
|
||||
} else
|
||||
queue_work(workqueue, &work);
|
||||
|
|
|
@ -14,7 +14,7 @@ static unsigned long avr_clock;
|
|||
|
||||
static struct work_struct wd_work;
|
||||
|
||||
static void wd_stop(void *unused)
|
||||
static void wd_stop(struct work_struct *unused)
|
||||
{
|
||||
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
|
||||
int i = 0, rescue = 8;
|
||||
|
@ -122,7 +122,7 @@ static int __init ls_uarts_init(void)
|
|||
|
||||
ls_uart_init();
|
||||
|
||||
INIT_WORK(&wd_work, wd_stop, NULL);
|
||||
INIT_WORK(&wd_work, wd_stop);
|
||||
schedule_work(&wd_work);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
|
||||
#define OLD_BACKLIGHT_MAX 15
|
||||
|
||||
static void pmac_backlight_key_worker(void *data);
|
||||
static void pmac_backlight_set_legacy_worker(void *data);
|
||||
static void pmac_backlight_key_worker(struct work_struct *work);
|
||||
static void pmac_backlight_set_legacy_worker(struct work_struct *work);
|
||||
|
||||
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
|
||||
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
|
||||
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
|
||||
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
|
||||
|
||||
/* Although these variables are used in interrupt context, it makes no sense to
|
||||
* protect them. No user is able to produce enough key events per second and
|
||||
|
@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
|
|||
return level;
|
||||
}
|
||||
|
||||
static void pmac_backlight_key_worker(void *data)
|
||||
static void pmac_backlight_key_worker(struct work_struct *work)
|
||||
{
|
||||
if (atomic_read(&kernel_backlight_disabled))
|
||||
return;
|
||||
|
@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
|
|||
return error;
|
||||
}
|
||||
|
||||
static void pmac_backlight_set_legacy_worker(void *data)
|
||||
static void pmac_backlight_set_legacy_worker(struct work_struct *work)
|
||||
{
|
||||
if (atomic_read(&kernel_backlight_disabled))
|
||||
return;
|
||||
|
|
|
@ -37,8 +37,8 @@
|
|||
/* EEH event workqueue setup. */
|
||||
static DEFINE_SPINLOCK(eeh_eventlist_lock);
|
||||
LIST_HEAD(eeh_eventlist);
|
||||
static void eeh_thread_launcher(void *);
|
||||
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
|
||||
static void eeh_thread_launcher(struct work_struct *);
|
||||
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
|
||||
|
||||
/* Serialize reset sequences for a given pci device */
|
||||
DEFINE_MUTEX(eeh_event_mutex);
|
||||
|
@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
|
|||
* eeh_thread_launcher
|
||||
* @dummy - unused
|
||||
*/
|
||||
static void eeh_thread_launcher(void *dummy)
|
||||
static void eeh_thread_launcher(struct work_struct *dummy)
|
||||
{
|
||||
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
|
||||
printk(KERN_ERR "Failed to start EEH daemon\n");
|
||||
|
|
|
@ -385,6 +385,7 @@ struct fcc_enet_private {
|
|||
phy_info_t *phy;
|
||||
struct work_struct phy_relink;
|
||||
struct work_struct phy_display_config;
|
||||
struct net_device *dev;
|
||||
|
||||
uint sequence_done;
|
||||
|
||||
|
@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static void mii_display_status(void *data)
|
||||
static void mii_display_status(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = data;
|
||||
volatile struct fcc_enet_private *fep = dev->priv;
|
||||
volatile struct fcc_enet_private *fep =
|
||||
container_of(work, struct fcc_enet_private, phy_relink);
|
||||
struct net_device *dev = fep->dev;
|
||||
uint s = fep->phy_status;
|
||||
|
||||
if (!fep->link && !fep->old_link) {
|
||||
|
@ -1428,10 +1430,12 @@ static void mii_display_status(void *data)
|
|||
printk(".\n");
|
||||
}
|
||||
|
||||
static void mii_display_config(void *data)
|
||||
static void mii_display_config(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = data;
|
||||
volatile struct fcc_enet_private *fep = dev->priv;
|
||||
volatile struct fcc_enet_private *fep =
|
||||
container_of(work, struct fcc_enet_private,
|
||||
phy_display_config);
|
||||
struct net_device *dev = fep->dev;
|
||||
uint s = fep->phy_status;
|
||||
|
||||
printk("%s: config: auto-negotiation ", dev->name);
|
||||
|
@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void)
|
|||
cep->phy_id_done = 0;
|
||||
cep->phy_addr = fip->fc_phyaddr;
|
||||
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
|
||||
INIT_WORK(&cep->phy_relink, mii_display_status, dev);
|
||||
INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
|
||||
INIT_WORK(&cep->phy_relink, mii_display_status);
|
||||
INIT_WORK(&cep->phy_display_config, mii_display_config);
|
||||
cep->dev = dev;
|
||||
#endif /* CONFIG_USE_MDIO */
|
||||
|
||||
fip++;
|
||||
|
|
|
@ -173,6 +173,7 @@ struct fec_enet_private {
|
|||
uint phy_speed;
|
||||
phy_info_t *phy;
|
||||
struct work_struct phy_task;
|
||||
struct net_device *dev;
|
||||
|
||||
uint sequence_done;
|
||||
|
||||
|
@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev)
|
|||
printk(".\n");
|
||||
}
|
||||
|
||||
static void mii_display_config(void *priv)
|
||||
static void mii_display_config(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)priv;
|
||||
struct fec_enet_private *fep = dev->priv;
|
||||
struct fec_enet_private *fep =
|
||||
container_of(work, struct fec_enet_private, phy_task);
|
||||
struct net_device *dev = fep->dev;
|
||||
volatile uint *s = &(fep->phy_status);
|
||||
|
||||
printk("%s: config: auto-negotiation ", dev->name);
|
||||
|
@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv)
|
|||
fep->sequence_done = 1;
|
||||
}
|
||||
|
||||
static void mii_relink(void *priv)
|
||||
static void mii_relink(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)priv;
|
||||
struct fec_enet_private *fep = dev->priv;
|
||||
struct fec_enet_private *fep =
|
||||
container_of(work, struct fec_enet_private, phy_task);
|
||||
struct net_device *dev = fep->dev;
|
||||
int duplex;
|
||||
|
||||
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
|
||||
|
@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
|
|||
{
|
||||
struct fec_enet_private *fep = dev->priv;
|
||||
|
||||
INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
|
||||
fep->dev = dev;
|
||||
INIT_WORK(&fep->phy_task, mii_relink);
|
||||
schedule_work(&fep->phy_task);
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev)
|
|||
{
|
||||
struct fec_enet_private *fep = dev->priv;
|
||||
|
||||
INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
|
||||
fep->dev = dev;
|
||||
INIT_WORK(&fep->phy_task, mii_display_config);
|
||||
schedule_work(&fep->phy_task);
|
||||
}
|
||||
|
||||
|
|
|
@ -92,8 +92,8 @@ static int appldata_timer_active;
|
|||
* Work queue
|
||||
*/
|
||||
static struct workqueue_struct *appldata_wq;
|
||||
static void appldata_work_fn(void *data);
|
||||
static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
|
||||
static void appldata_work_fn(struct work_struct *work);
|
||||
static DECLARE_WORK(appldata_work, appldata_work_fn);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data)
|
|||
*
|
||||
* call data gathering function for each (active) module
|
||||
*/
|
||||
static void appldata_work_fn(void *data)
|
||||
static void appldata_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct list_head *lh;
|
||||
struct appldata_ops *ops;
|
||||
|
|
|
@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans)
|
|||
return -1;
|
||||
}
|
||||
|
||||
void chan_interrupt(struct list_head *chans, struct work_struct *task,
|
||||
void chan_interrupt(struct list_head *chans, struct delayed_work *task,
|
||||
struct tty_struct *tty, int irq)
|
||||
{
|
||||
struct list_head *ele, *next;
|
||||
|
|
|
@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = {
|
|||
|
||||
static LIST_HEAD(mc_requests);
|
||||
|
||||
static void mc_work_proc(void *unused)
|
||||
static void mc_work_proc(struct work_struct *unused)
|
||||
{
|
||||
struct mconsole_entry *req;
|
||||
unsigned long flags;
|
||||
|
@ -72,7 +72,7 @@ static void mc_work_proc(void *unused)
|
|||
}
|
||||
}
|
||||
|
||||
static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
|
||||
static DECLARE_WORK(mconsole_work, mc_work_proc);
|
||||
|
||||
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
|
|
|
@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
|
|||
* same device, since it tests for (dev->flags & IFF_UP). So
|
||||
* there's no harm in delaying the device shutdown. */
|
||||
schedule_work(&close_work);
|
||||
#error this is not permitted - close_work will go out of scope
|
||||
goto out;
|
||||
}
|
||||
reactivate_fd(lp->fd, UM_ETH_IRQ);
|
||||
|
|
|
@ -132,7 +132,7 @@ static int port_accept(struct port_list *port)
|
|||
DECLARE_MUTEX(ports_sem);
|
||||
struct list_head ports = LIST_HEAD_INIT(ports);
|
||||
|
||||
void port_work_proc(void *unused)
|
||||
void port_work_proc(struct work_struct *unused)
|
||||
{
|
||||
struct port_list *port;
|
||||
struct list_head *ele;
|
||||
|
@ -150,7 +150,7 @@ void port_work_proc(void *unused)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
DECLARE_WORK(port_work, port_work_proc, NULL);
|
||||
DECLARE_WORK(port_work, port_work_proc);
|
||||
|
||||
static irqreturn_t port_interrupt(int irq, void *data)
|
||||
{
|
||||
|
|
|
@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
|
|||
*/
|
||||
|
||||
static int check_interval = 5 * 60; /* 5 minutes */
|
||||
static void mcheck_timer(void *data);
|
||||
static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
|
||||
static void mcheck_timer(struct work_struct *work);
|
||||
static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
|
||||
|
||||
static void mcheck_check_cpu(void *info)
|
||||
{
|
||||
|
@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
|
|||
do_machine_check(NULL, 0);
|
||||
}
|
||||
|
||||
static void mcheck_timer(void *data)
|
||||
static void mcheck_timer(struct work_struct *work)
|
||||
{
|
||||
on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
|
||||
schedule_delayed_work(&mcheck_work, check_interval * HZ);
|
||||
|
|
|
@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
|
|||
}
|
||||
|
||||
struct create_idle {
|
||||
struct work_struct work;
|
||||
struct task_struct *idle;
|
||||
struct completion done;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
void do_fork_idle(void *_c_idle)
|
||||
void do_fork_idle(struct work_struct *work)
|
||||
{
|
||||
struct create_idle *c_idle = _c_idle;
|
||||
struct create_idle *c_idle =
|
||||
container_of(work, struct create_idle, work);
|
||||
|
||||
c_idle->idle = fork_idle(c_idle->cpu);
|
||||
complete(&c_idle->done);
|
||||
|
@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
|
|||
int timeout;
|
||||
unsigned long start_rip;
|
||||
struct create_idle c_idle = {
|
||||
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
|
||||
.cpu = cpu,
|
||||
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
|
||||
};
|
||||
DECLARE_WORK(work, do_fork_idle, &c_idle);
|
||||
|
||||
/* allocate memory for gdts of secondary cpus. Hotplug is considered */
|
||||
if (!cpu_gdt_descr[cpu].address &&
|
||||
|
@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
|
|||
* thread.
|
||||
*/
|
||||
if (!keventd_up() || current_is_keventd())
|
||||
work.func(work.data);
|
||||
c_idle.work.func(&c_idle.work);
|
||||
else {
|
||||
schedule_work(&work);
|
||||
schedule_work(&c_idle.work);
|
||||
wait_for_completion(&c_idle.done);
|
||||
}
|
||||
|
||||
|
|
|
@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
|
|||
static unsigned int cpufreq_init = 0;
|
||||
static struct work_struct cpufreq_delayed_get_work;
|
||||
|
||||
static void handle_cpufreq_delayed_get(void *v)
|
||||
static void handle_cpufreq_delayed_get(struct work_struct *v)
|
||||
{
|
||||
unsigned int cpu;
|
||||
for_each_online_cpu(cpu) {
|
||||
|
@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
|
|||
|
||||
static int __init cpufreq_tsc(void)
|
||||
{
|
||||
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
|
||||
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
|
||||
if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER))
|
||||
cpufreq_init = 1;
|
||||
|
|
|
@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
|
|||
*
|
||||
* FIXME! dispatch queue is not a queue at all!
|
||||
*/
|
||||
static void as_work_handler(void *data)
|
||||
static void as_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct as_data *ad = container_of(work, struct as_data, antic_work);
|
||||
struct request_queue *q = ad->q;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q)
|
|||
ad->antic_timer.function = as_antic_timeout;
|
||||
ad->antic_timer.data = (unsigned long)q;
|
||||
init_timer(&ad->antic_timer);
|
||||
INIT_WORK(&ad->antic_work, as_work_handler, q);
|
||||
INIT_WORK(&ad->antic_work, as_work_handler);
|
||||
|
||||
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
|
||||
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
|
||||
|
|
|
@ -1840,9 +1840,11 @@ queue_fail:
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void cfq_kick_queue(void *data)
|
||||
static void cfq_kick_queue(struct work_struct *work)
|
||||
{
|
||||
request_queue_t *q = data;
|
||||
struct cfq_data *cfqd =
|
||||
container_of(work, struct cfq_data, unplug_work);
|
||||
request_queue_t *q = cfqd->queue;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
@ -1986,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q)
|
|||
cfqd->idle_class_timer.function = cfq_idle_class_timer;
|
||||
cfqd->idle_class_timer.data = (unsigned long) cfqd;
|
||||
|
||||
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
|
||||
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
||||
|
||||
cfqd->cfq_quantum = cfq_quantum;
|
||||
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
*/
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
static void blk_unplug_work(void *data);
|
||||
static void blk_unplug_work(struct work_struct *work);
|
||||
static void blk_unplug_timeout(unsigned long data);
|
||||
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
|
||||
static void init_request_from_bio(struct request *req, struct bio *bio);
|
||||
|
@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
|
|||
if (q->unplug_delay == 0)
|
||||
q->unplug_delay = 1;
|
||||
|
||||
INIT_WORK(&q->unplug_work, blk_unplug_work, q);
|
||||
INIT_WORK(&q->unplug_work, blk_unplug_work);
|
||||
|
||||
q->unplug_timer.function = blk_unplug_timeout;
|
||||
q->unplug_timer.data = (unsigned long)q;
|
||||
|
@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
|
|||
}
|
||||
}
|
||||
|
||||
static void blk_unplug_work(void *data)
|
||||
static void blk_unplug_work(struct work_struct *work)
|
||||
{
|
||||
request_queue_t *q = data;
|
||||
request_queue_t *q = container_of(work, request_queue_t, unplug_work);
|
||||
|
||||
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
|
||||
q->rq.count[READ] + q->rq.count[WRITE]);
|
||||
|
|
|
@ -40,9 +40,10 @@ struct cryptomgr_param {
|
|||
char template[CRYPTO_MAX_ALG_NAME];
|
||||
};
|
||||
|
||||
static void cryptomgr_probe(void *data)
|
||||
static void cryptomgr_probe(struct work_struct *work)
|
||||
{
|
||||
struct cryptomgr_param *param = data;
|
||||
struct cryptomgr_param *param =
|
||||
container_of(work, struct cryptomgr_param, work);
|
||||
struct crypto_template *tmpl;
|
||||
struct crypto_instance *inst;
|
||||
int err;
|
||||
|
@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
|
|||
param->larval.type = larval->alg.cra_flags;
|
||||
param->larval.mask = larval->mask;
|
||||
|
||||
INIT_WORK(¶m->work, cryptomgr_probe, param);
|
||||
INIT_WORK(¶m->work, cryptomgr_probe);
|
||||
schedule_work(¶m->work);
|
||||
|
||||
return NOTIFY_STOP;
|
||||
|
|
|
@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl")
|
|||
struct acpi_os_dpc {
|
||||
acpi_osd_exec_callback function;
|
||||
void *context;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI_CUSTOM_DSDT
|
||||
|
@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
|
|||
acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
|
||||
}
|
||||
|
||||
static void acpi_os_execute_deferred(void *context)
|
||||
static void acpi_os_execute_deferred(struct work_struct *work)
|
||||
{
|
||||
struct acpi_os_dpc *dpc = NULL;
|
||||
|
||||
|
||||
dpc = (struct acpi_os_dpc *)context;
|
||||
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
|
||||
if (!dpc) {
|
||||
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
|
||||
return;
|
||||
|
@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
|||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_os_dpc *dpc;
|
||||
struct work_struct *task;
|
||||
|
||||
ACPI_FUNCTION_TRACE("os_queue_for_execution");
|
||||
|
||||
|
@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
|||
|
||||
/*
|
||||
* Allocate/initialize DPC structure. Note that this memory will be
|
||||
* freed by the callee. The kernel handles the tq_struct list in a
|
||||
* freed by the callee. The kernel handles the work_struct list in a
|
||||
* way that allows us to also free its memory inside the callee.
|
||||
* Because we may want to schedule several tasks with different
|
||||
* parameters we can't use the approach some kernel code uses of
|
||||
* having a static tq_struct.
|
||||
* We can save time and code by allocating the DPC and tq_structs
|
||||
* from the same memory.
|
||||
* having a static work_struct.
|
||||
*/
|
||||
|
||||
dpc =
|
||||
kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
|
||||
GFP_ATOMIC);
|
||||
dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
|
||||
if (!dpc)
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
|
||||
dpc->function = function;
|
||||
dpc->context = context;
|
||||
|
||||
task = (void *)(dpc + 1);
|
||||
INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
|
||||
|
||||
if (!queue_work(kacpid_wq, task)) {
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||
if (!queue_work(kacpid_wq, &dpc->work)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
|
||||
"Call to queue_work() failed.\n"));
|
||||
kfree(dpc);
|
||||
|
|
|
@ -1081,7 +1081,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
|
|||
* ata_port_queue_task - Queue port_task
|
||||
* @ap: The ata_port to queue port_task for
|
||||
* @fn: workqueue function to be scheduled
|
||||
* @data: data value to pass to workqueue function
|
||||
* @data: data for @fn to use
|
||||
* @delay: delay time for workqueue function
|
||||
*
|
||||
* Schedule @fn(@data) for execution after @delay jiffies using
|
||||
|
@ -1096,7 +1096,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
|
|||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
|
||||
void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
|
||||
unsigned long delay)
|
||||
{
|
||||
int rc;
|
||||
|
@ -1104,12 +1104,10 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
|
|||
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
|
||||
return;
|
||||
|
||||
PREPARE_WORK(&ap->port_task, fn, data);
|
||||
PREPARE_DELAYED_WORK(&ap->port_task, fn);
|
||||
ap->port_task_data = data;
|
||||
|
||||
if (!delay)
|
||||
rc = queue_work(ata_wq, &ap->port_task);
|
||||
else
|
||||
rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
|
||||
rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
|
||||
|
||||
/* rc == 0 means that another user is using port task */
|
||||
WARN_ON(rc == 0);
|
||||
|
@ -4588,10 +4586,11 @@ fsm_start:
|
|||
return poll_next;
|
||||
}
|
||||
|
||||
static void ata_pio_task(void *_data)
|
||||
static void ata_pio_task(struct work_struct *work)
|
||||
{
|
||||
struct ata_queued_cmd *qc = _data;
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ata_port *ap =
|
||||
container_of(work, struct ata_port, port_task.work);
|
||||
struct ata_queued_cmd *qc = ap->port_task_data;
|
||||
u8 status;
|
||||
int poll_next;
|
||||
|
||||
|
@ -5635,9 +5634,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
|
|||
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
|
||||
#endif
|
||||
|
||||
INIT_WORK(&ap->port_task, NULL, NULL);
|
||||
INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
|
||||
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
|
||||
INIT_DELAYED_WORK(&ap->port_task, NULL);
|
||||
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
|
||||
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
|
||||
INIT_LIST_HEAD(&ap->eh_done_q);
|
||||
init_waitqueue_head(&ap->eh_wait_q);
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
|||
if (ap->pflags & ATA_PFLAG_LOADING)
|
||||
ap->pflags &= ~ATA_PFLAG_LOADING;
|
||||
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
|
||||
queue_work(ata_aux_wq, &ap->hotplug_task);
|
||||
queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_RECOVERED)
|
||||
ata_port_printk(ap, KERN_INFO, "EH complete\n");
|
||||
|
|
|
@ -2963,7 +2963,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
|
|||
|
||||
/**
|
||||
* ata_scsi_hotplug - SCSI part of hotplug
|
||||
* @data: Pointer to ATA port to perform SCSI hotplug on
|
||||
* @work: Pointer to ATA port to perform SCSI hotplug on
|
||||
*
|
||||
* Perform SCSI part of hotplug. It's executed from a separate
|
||||
* workqueue after EH completes. This is necessary because SCSI
|
||||
|
@ -2973,9 +2973,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
|
|||
* LOCKING:
|
||||
* Kernel thread context (may sleep).
|
||||
*/
|
||||
void ata_scsi_hotplug(void *data)
|
||||
void ata_scsi_hotplug(struct work_struct *work)
|
||||
{
|
||||
struct ata_port *ap = data;
|
||||
struct ata_port *ap =
|
||||
container_of(work, struct ata_port, hotplug_task.work);
|
||||
int i;
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_UNLOADING) {
|
||||
|
@ -3076,7 +3077,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
|
|||
|
||||
/**
|
||||
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
|
||||
* @data: Pointer to ATA port to perform scsi_rescan_device()
|
||||
* @work: Pointer to ATA port to perform scsi_rescan_device()
|
||||
*
|
||||
* After ATA pass thru (SAT) commands are executed successfully,
|
||||
* libata need to propagate the changes to SCSI layer. This
|
||||
|
@ -3086,9 +3087,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
|
|||
* LOCKING:
|
||||
* Kernel thread context (may sleep).
|
||||
*/
|
||||
void ata_scsi_dev_rescan(void *data)
|
||||
void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
{
|
||||
struct ata_port *ap = data;
|
||||
struct ata_port *ap =
|
||||
container_of(work, struct ata_port, scsi_rescan_task);
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ extern struct scsi_transport_template ata_scsi_transport_template;
|
|||
|
||||
extern void ata_scsi_scan_host(struct ata_port *ap);
|
||||
extern int ata_scsi_offline_dev(struct ata_device *dev);
|
||||
extern void ata_scsi_hotplug(void *data);
|
||||
extern void ata_scsi_hotplug(struct work_struct *work);
|
||||
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
|
||||
unsigned int buflen);
|
||||
|
||||
|
@ -124,7 +124,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
|
|||
unsigned int (*actor) (struct ata_scsi_args *args,
|
||||
u8 *rbuf, unsigned int buflen));
|
||||
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
|
||||
extern void ata_scsi_dev_rescan(void *data);
|
||||
extern void ata_scsi_dev_rescan(struct work_struct *work);
|
||||
extern int ata_bus_probe(struct ata_port *ap);
|
||||
|
||||
/* libata-eh.c */
|
||||
|
|
|
@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos,
|
|||
int flags);
|
||||
static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
|
||||
char *page);
|
||||
static void idt77252_softint(void *dev_id);
|
||||
static void idt77252_softint(struct work_struct *work);
|
||||
|
||||
|
||||
static struct atmdev_ops idt77252_ops =
|
||||
|
@ -2866,9 +2866,10 @@ out:
|
|||
}
|
||||
|
||||
static void
|
||||
idt77252_softint(void *dev_id)
|
||||
idt77252_softint(struct work_struct *work)
|
||||
{
|
||||
struct idt77252_dev *card = dev_id;
|
||||
struct idt77252_dev *card =
|
||||
container_of(work, struct idt77252_dev, tqueue);
|
||||
u32 stat;
|
||||
int done;
|
||||
|
||||
|
@ -3697,7 +3698,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
|
|||
card->pcidev = pcidev;
|
||||
sprintf(card->name, "idt77252-%d", card->index);
|
||||
|
||||
INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
|
||||
INIT_WORK(&card->tqueue, idt77252_softint);
|
||||
|
||||
membase = pci_resource_start(pcidev, 1);
|
||||
srambase = pci_resource_start(pcidev, 2);
|
||||
|
|
|
@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d);
|
|||
void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
|
||||
void aoecmd_ata_rsp(struct sk_buff *);
|
||||
void aoecmd_cfg_rsp(struct sk_buff *);
|
||||
void aoecmd_sleepwork(void *vp);
|
||||
void aoecmd_sleepwork(struct work_struct *);
|
||||
struct sk_buff *new_skb(ulong);
|
||||
|
||||
int aoedev_init(void);
|
||||
|
|
|
@ -408,9 +408,9 @@ rexmit_timer(ulong vp)
|
|||
/* this function performs work that has been deferred until sleeping is OK
|
||||
*/
|
||||
void
|
||||
aoecmd_sleepwork(void *vp)
|
||||
aoecmd_sleepwork(struct work_struct *work)
|
||||
{
|
||||
struct aoedev *d = (struct aoedev *) vp;
|
||||
struct aoedev *d = container_of(work, struct aoedev, work);
|
||||
|
||||
if (d->flags & DEVFL_GDALLOC)
|
||||
aoeblk_gdalloc(d);
|
||||
|
|
|
@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes)
|
|||
kfree(d);
|
||||
return NULL;
|
||||
}
|
||||
INIT_WORK(&d->work, aoecmd_sleepwork, d);
|
||||
INIT_WORK(&d->work, aoecmd_sleepwork);
|
||||
spin_lock_init(&d->lock);
|
||||
init_timer(&d->timer);
|
||||
d->timer.data = (ulong) d;
|
||||
|
|
|
@ -992,11 +992,11 @@ static void empty(void)
|
|||
{
|
||||
}
|
||||
|
||||
static DECLARE_WORK(floppy_work, NULL, NULL);
|
||||
static DECLARE_WORK(floppy_work, NULL);
|
||||
|
||||
static void schedule_bh(void (*handler) (void))
|
||||
{
|
||||
PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
|
||||
PREPARE_WORK(&floppy_work, (work_func_t)handler);
|
||||
schedule_work(&floppy_work);
|
||||
}
|
||||
|
||||
|
@ -1008,7 +1008,7 @@ static void cancel_activity(void)
|
|||
|
||||
spin_lock_irqsave(&floppy_lock, flags);
|
||||
do_floppy = NULL;
|
||||
PREPARE_WORK(&floppy_work, (void *)empty, NULL);
|
||||
PREPARE_WORK(&floppy_work, (work_func_t)empty);
|
||||
del_timer(&fd_timer);
|
||||
spin_unlock_irqrestore(&floppy_lock, flags);
|
||||
}
|
||||
|
@ -1868,7 +1868,7 @@ static void show_floppy(void)
|
|||
printk("fdc_busy=%lu\n", fdc_busy);
|
||||
if (do_floppy)
|
||||
printk("do_floppy=%p\n", do_floppy);
|
||||
if (floppy_work.pending)
|
||||
if (work_pending(&floppy_work))
|
||||
printk("floppy_work.func=%p\n", floppy_work.func);
|
||||
if (timer_pending(&fd_timer))
|
||||
printk("fd_timer.function=%p\n", fd_timer.function);
|
||||
|
@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void)
|
|||
printk("floppy timer still active:%s\n", timeout_message);
|
||||
if (timer_pending(&fd_timer))
|
||||
printk("auxiliary floppy timer still active\n");
|
||||
if (floppy_work.pending)
|
||||
if (work_pending(&floppy_work))
|
||||
printk("work still pending\n");
|
||||
#endif
|
||||
old_fdc = fdc;
|
||||
|
|
|
@ -352,19 +352,19 @@ static enum action (*phase)(void);
|
|||
|
||||
static void run_fsm(void);
|
||||
|
||||
static void ps_tq_int( void *data);
|
||||
static void ps_tq_int(struct work_struct *work);
|
||||
|
||||
static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
|
||||
static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
|
||||
|
||||
static void schedule_fsm(void)
|
||||
{
|
||||
if (!nice)
|
||||
schedule_work(&fsm_tq);
|
||||
schedule_delayed_work(&fsm_tq, 0);
|
||||
else
|
||||
schedule_delayed_work(&fsm_tq, nice-1);
|
||||
}
|
||||
|
||||
static void ps_tq_int(void *data)
|
||||
static void ps_tq_int(struct work_struct *work)
|
||||
{
|
||||
run_fsm();
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
static void ps_tq_int( void *data);
|
||||
static void ps_tq_int(struct work_struct *work);
|
||||
|
||||
static void (* ps_continuation)(void);
|
||||
static int (* ps_ready)(void);
|
||||
|
@ -45,7 +45,7 @@ static int ps_nice = 0;
|
|||
|
||||
static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
|
||||
|
||||
static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
|
||||
static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
|
||||
|
||||
static void ps_set_intr(void (*continuation)(void),
|
||||
int (*ready)(void),
|
||||
|
@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void),
|
|||
if (!ps_tq_active) {
|
||||
ps_tq_active = 1;
|
||||
if (!ps_nice)
|
||||
schedule_work(&ps_tq);
|
||||
schedule_delayed_work(&ps_tq, 0);
|
||||
else
|
||||
schedule_delayed_work(&ps_tq, ps_nice-1);
|
||||
}
|
||||
spin_unlock_irqrestore(&ps_spinlock,flags);
|
||||
}
|
||||
|
||||
static void ps_tq_int(void *data)
|
||||
static void ps_tq_int(struct work_struct *work)
|
||||
{
|
||||
void (*con)(void);
|
||||
unsigned long flags;
|
||||
|
@ -92,7 +92,7 @@ static void ps_tq_int(void *data)
|
|||
}
|
||||
ps_tq_active = 1;
|
||||
if (!ps_nice)
|
||||
schedule_work(&ps_tq);
|
||||
schedule_delayed_work(&ps_tq, 0);
|
||||
else
|
||||
schedule_delayed_work(&ps_tq, ps_nice-1);
|
||||
spin_unlock_irqrestore(&ps_spinlock,flags);
|
||||
|
|
|
@ -1244,9 +1244,10 @@ out:
|
|||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
static void carm_fsm_task (void *_data)
|
||||
static void carm_fsm_task (struct work_struct *work)
|
||||
{
|
||||
struct carm_host *host = _data;
|
||||
struct carm_host *host =
|
||||
container_of(work, struct carm_host, fsm_task);
|
||||
unsigned long flags;
|
||||
unsigned int state;
|
||||
int rc, i, next_dev;
|
||||
|
@ -1619,7 +1620,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
host->pdev = pdev;
|
||||
host->flags = pci_dac ? FL_DAC : 0;
|
||||
spin_lock_init(&host->lock);
|
||||
INIT_WORK(&host->fsm_task, carm_fsm_task, host);
|
||||
INIT_WORK(&host->fsm_task, carm_fsm_task);
|
||||
init_completion(&host->probe_comp);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(host->req); i++)
|
||||
|
|
|
@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
|
|||
int stalled_pipe);
|
||||
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
|
||||
static void ub_reset_enter(struct ub_dev *sc, int try);
|
||||
static void ub_reset_task(void *arg);
|
||||
static void ub_reset_task(struct work_struct *work);
|
||||
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
|
||||
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
|
||||
struct ub_capacity *ret);
|
||||
|
@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try)
|
|||
schedule_work(&sc->reset_work);
|
||||
}
|
||||
|
||||
static void ub_reset_task(void *arg)
|
||||
static void ub_reset_task(struct work_struct *work)
|
||||
{
|
||||
struct ub_dev *sc = arg;
|
||||
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
|
||||
unsigned long flags;
|
||||
struct list_head *p;
|
||||
struct ub_lun *lun;
|
||||
|
@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf,
|
|||
usb_init_urb(&sc->work_urb);
|
||||
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
|
||||
atomic_set(&sc->poison, 0);
|
||||
INIT_WORK(&sc->reset_work, ub_reset_task, sc);
|
||||
INIT_WORK(&sc->reset_work, ub_reset_task);
|
||||
init_waitqueue_head(&sc->reset_wait);
|
||||
|
||||
init_timer(&sc->work_timer);
|
||||
|
|
|
@ -157,9 +157,10 @@ static void bcm203x_complete(struct urb *urb)
|
|||
}
|
||||
}
|
||||
|
||||
static void bcm203x_work(void *user_data)
|
||||
static void bcm203x_work(struct work_struct *work)
|
||||
{
|
||||
struct bcm203x_data *data = user_data;
|
||||
struct bcm203x_data *data =
|
||||
container_of(work, struct bcm203x_data, work);
|
||||
|
||||
if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
|
||||
BT_ERR("Can't submit URB");
|
||||
|
@ -246,7 +247,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
|
|||
|
||||
release_firmware(firmware);
|
||||
|
||||
INIT_WORK(&data->work, bcm203x_work, (void *) data);
|
||||
INIT_WORK(&data->work, bcm203x_work);
|
||||
|
||||
usb_set_intfdata(intf, data);
|
||||
|
||||
|
|
|
@ -926,9 +926,10 @@ cy_sched_event(struct cyclades_port *info, int event)
|
|||
* had to poll every port to see if that port needed servicing.
|
||||
*/
|
||||
static void
|
||||
do_softint(void *private_)
|
||||
do_softint(struct work_struct *work)
|
||||
{
|
||||
struct cyclades_port *info = (struct cyclades_port *) private_;
|
||||
struct cyclades_port *info =
|
||||
container_of(work, struct cyclades_port, tqueue);
|
||||
struct tty_struct *tty;
|
||||
|
||||
tty = info->tty;
|
||||
|
@ -5328,7 +5329,7 @@ cy_init(void)
|
|||
info->blocked_open = 0;
|
||||
info->default_threshold = 0;
|
||||
info->default_timeout = 0;
|
||||
INIT_WORK(&info->tqueue, do_softint, info);
|
||||
INIT_WORK(&info->tqueue, do_softint);
|
||||
init_waitqueue_head(&info->open_wait);
|
||||
init_waitqueue_head(&info->close_wait);
|
||||
init_waitqueue_head(&info->shutdown_wait);
|
||||
|
@ -5403,7 +5404,7 @@ cy_init(void)
|
|||
info->blocked_open = 0;
|
||||
info->default_threshold = 0;
|
||||
info->default_timeout = 0;
|
||||
INIT_WORK(&info->tqueue, do_softint, info);
|
||||
INIT_WORK(&info->tqueue, do_softint);
|
||||
init_waitqueue_head(&info->open_wait);
|
||||
init_waitqueue_head(&info->close_wait);
|
||||
init_waitqueue_head(&info->shutdown_wait);
|
||||
|
|
|
@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data)
|
|||
|
||||
|
||||
static void
|
||||
via_dmablit_workqueue(void *data)
|
||||
via_dmablit_workqueue(struct work_struct *work)
|
||||
{
|
||||
drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
|
||||
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
|
||||
drm_device_t *dev = blitq->dev;
|
||||
unsigned long irqsave;
|
||||
drm_via_sg_info_t *cur_sg;
|
||||
|
@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev)
|
|||
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
|
||||
}
|
||||
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
|
||||
INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
|
||||
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
|
||||
init_timer(&blitq->poll_timer);
|
||||
blitq->poll_timer.function = &via_dmablit_timer;
|
||||
blitq->poll_timer.data = (unsigned long) blitq;
|
||||
|
|
|
@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *,
|
|||
static int info_ioctl(struct tty_struct *, struct file *,
|
||||
unsigned int, unsigned long);
|
||||
static void pc_set_termios(struct tty_struct *, struct termios *);
|
||||
static void do_softint(void *);
|
||||
static void do_softint(struct work_struct *work);
|
||||
static void pc_stop(struct tty_struct *);
|
||||
static void pc_start(struct tty_struct *);
|
||||
static void pc_throttle(struct tty_struct * tty);
|
||||
|
@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd)
|
|||
|
||||
ch->brdchan = bc;
|
||||
ch->mailbox = gd;
|
||||
INIT_WORK(&ch->tqueue, do_softint, ch);
|
||||
INIT_WORK(&ch->tqueue, do_softint);
|
||||
ch->board = &boards[crd];
|
||||
|
||||
spin_lock_irqsave(&epca_lock, flags);
|
||||
|
@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
|
|||
|
||||
/* --------------------- Begin do_softint ----------------------- */
|
||||
|
||||
static void do_softint(void *private_)
|
||||
static void do_softint(struct work_struct *work)
|
||||
{ /* Begin do_softint */
|
||||
struct channel *ch = (struct channel *) private_;
|
||||
struct channel *ch = container_of(work, struct channel, tqueue);
|
||||
/* Called in response to a modem change event */
|
||||
if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
|
||||
struct tty_struct *tty = ch->tty;
|
||||
|
|
|
@ -723,9 +723,10 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
|
|||
* -------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void do_softint(void *private_)
|
||||
static void do_softint(struct work_struct *work)
|
||||
{
|
||||
struct esp_struct *info = (struct esp_struct *) private_;
|
||||
struct esp_struct *info =
|
||||
container_of(work, struct esp_struct, tqueue);
|
||||
struct tty_struct *tty;
|
||||
|
||||
tty = info->tty;
|
||||
|
@ -746,9 +747,10 @@ static void do_softint(void *private_)
|
|||
* do_serial_hangup() -> tty->hangup() -> esp_hangup()
|
||||
*
|
||||
*/
|
||||
static void do_serial_hangup(void *private_)
|
||||
static void do_serial_hangup(struct work_struct *work)
|
||||
{
|
||||
struct esp_struct *info = (struct esp_struct *) private_;
|
||||
struct esp_struct *info =
|
||||
container_of(work, struct esp_struct, tqueue_hangup);
|
||||
struct tty_struct *tty;
|
||||
|
||||
tty = info->tty;
|
||||
|
@ -2501,8 +2503,8 @@ static int __init espserial_init(void)
|
|||
info->magic = ESP_MAGIC;
|
||||
info->close_delay = 5*HZ/10;
|
||||
info->closing_wait = 30*HZ;
|
||||
INIT_WORK(&info->tqueue, do_softint, info);
|
||||
INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
|
||||
INIT_WORK(&info->tqueue, do_softint);
|
||||
INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
|
||||
info->config.rx_timeout = rx_timeout;
|
||||
info->config.flow_on = flow_on;
|
||||
info->config.flow_off = flow_off;
|
||||
|
|
|
@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg);
|
|||
* Routine to poll RTC seconds field for change as often as possible,
|
||||
* after first RTC_UIE use timer to reduce polling
|
||||
*/
|
||||
static void genrtc_troutine(void *data)
|
||||
static void genrtc_troutine(struct work_struct *work)
|
||||
{
|
||||
unsigned int tmp = get_rtc_ss();
|
||||
|
||||
|
@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
|
|||
irq_active = 1;
|
||||
stop_rtc_timers = 0;
|
||||
lostint = 0;
|
||||
INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
|
||||
INIT_WORK(&genrtc_task, genrtc_troutine);
|
||||
oldsecs = get_rtc_ss();
|
||||
init_timer(&timer_task);
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
|
||||
|
||||
struct hvsi_struct {
|
||||
struct work_struct writer;
|
||||
struct delayed_work writer;
|
||||
struct work_struct handshaker;
|
||||
wait_queue_head_t emptyq; /* woken when outbuf is emptied */
|
||||
wait_queue_head_t stateq; /* woken when HVSI state changes */
|
||||
|
@ -744,9 +744,10 @@ static int hvsi_handshake(struct hvsi_struct *hp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void hvsi_handshaker(void *arg)
|
||||
static void hvsi_handshaker(struct work_struct *work)
|
||||
{
|
||||
struct hvsi_struct *hp = (struct hvsi_struct *)arg;
|
||||
struct hvsi_struct *hp =
|
||||
container_of(work, struct hvsi_struct, handshaker);
|
||||
|
||||
if (hvsi_handshake(hp) >= 0)
|
||||
return;
|
||||
|
@ -951,9 +952,10 @@ static void hvsi_push(struct hvsi_struct *hp)
|
|||
}
|
||||
|
||||
/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
|
||||
static void hvsi_write_worker(void *arg)
|
||||
static void hvsi_write_worker(struct work_struct *work)
|
||||
{
|
||||
struct hvsi_struct *hp = (struct hvsi_struct *)arg;
|
||||
struct hvsi_struct *hp =
|
||||
container_of(work, struct hvsi_struct, writer.work);
|
||||
unsigned long flags;
|
||||
#ifdef DEBUG
|
||||
static long start_j = 0;
|
||||
|
@ -1287,8 +1289,8 @@ static int __init hvsi_console_init(void)
|
|||
}
|
||||
|
||||
hp = &hvsi_ports[hvsi_count];
|
||||
INIT_WORK(&hp->writer, hvsi_write_worker, hp);
|
||||
INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
|
||||
INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
|
||||
INIT_WORK(&hp->handshaker, hvsi_handshaker);
|
||||
init_waitqueue_head(&hp->emptyq);
|
||||
init_waitqueue_head(&hp->stateq);
|
||||
spin_lock_init(&hp->lock);
|
||||
|
|
|
@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr);
|
|||
static void serviceOutgoingFifo(i2eBordStrPtr);
|
||||
|
||||
// Functions defined in ip2.c as part of interrupt handling
|
||||
static void do_input(void *);
|
||||
static void do_status(void *);
|
||||
static void do_input(struct work_struct *);
|
||||
static void do_status(struct work_struct *);
|
||||
|
||||
//***************
|
||||
//* Debug Data *
|
||||
|
@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
|
|||
pCh->ClosingWaitTime = 30*HZ;
|
||||
|
||||
// Initialize task queue objects
|
||||
INIT_WORK(&pCh->tqueue_input, do_input, pCh);
|
||||
INIT_WORK(&pCh->tqueue_status, do_status, pCh);
|
||||
INIT_WORK(&pCh->tqueue_input, do_input);
|
||||
INIT_WORK(&pCh->tqueue_status, do_status);
|
||||
|
||||
#ifdef IP2DEBUG_TRACE
|
||||
pCh->trace = ip2trace;
|
||||
|
@ -1573,7 +1573,7 @@ i2StripFifo(i2eBordStrPtr pB)
|
|||
#ifdef USE_IQ
|
||||
schedule_work(&pCh->tqueue_input);
|
||||
#else
|
||||
do_input(pCh);
|
||||
do_input(&pCh->tqueue_input);
|
||||
#endif
|
||||
|
||||
// Note we do not need to maintain any flow-control credits at this
|
||||
|
@ -1810,7 +1810,7 @@ i2StripFifo(i2eBordStrPtr pB)
|
|||
#ifdef USE_IQ
|
||||
schedule_work(&pCh->tqueue_status);
|
||||
#else
|
||||
do_status(pCh);
|
||||
do_status(&pCh->tqueue_status);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -189,12 +189,12 @@ static int ip2_tiocmset(struct tty_struct *tty, struct file *file,
|
|||
unsigned int set, unsigned int clear);
|
||||
|
||||
static void set_irq(int, int);
|
||||
static void ip2_interrupt_bh(i2eBordStrPtr pB);
|
||||
static void ip2_interrupt_bh(struct work_struct *work);
|
||||
static irqreturn_t ip2_interrupt(int irq, void *dev_id);
|
||||
static void ip2_poll(unsigned long arg);
|
||||
static inline void service_all_boards(void);
|
||||
static void do_input(void *p);
|
||||
static void do_status(void *p);
|
||||
static void do_input(struct work_struct *);
|
||||
static void do_status(struct work_struct *);
|
||||
|
||||
static void ip2_wait_until_sent(PTTY,int);
|
||||
|
||||
|
@ -918,7 +918,7 @@ ip2_init_board( int boardnum )
|
|||
pCh++;
|
||||
}
|
||||
ex_exit:
|
||||
INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
|
||||
INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
|
||||
return;
|
||||
|
||||
err_release_region:
|
||||
|
@ -1125,8 +1125,8 @@ service_all_boards(void)
|
|||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function: ip2_interrupt_bh(pB) */
|
||||
/* Parameters: pB - pointer to the board structure */
|
||||
/* Function: ip2_interrupt_bh(work) */
|
||||
/* Parameters: work - pointer to the board structure */
|
||||
/* Returns: Nothing */
|
||||
/* */
|
||||
/* Description: */
|
||||
|
@ -1135,8 +1135,9 @@ service_all_boards(void)
|
|||
/* */
|
||||
/******************************************************************************/
|
||||
static void
|
||||
ip2_interrupt_bh(i2eBordStrPtr pB)
|
||||
ip2_interrupt_bh(struct work_struct *work)
|
||||
{
|
||||
i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
|
||||
// pB better well be set or we have a problem! We can only get
|
||||
// here from the IMMEDIATE queue. Here, we process the boards.
|
||||
// Checking pB doesn't cost much and it saves us from the sanity checkers.
|
||||
|
@ -1245,9 +1246,9 @@ ip2_poll(unsigned long arg)
|
|||
ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
|
||||
}
|
||||
|
||||
static void do_input(void *p)
|
||||
static void do_input(struct work_struct *work)
|
||||
{
|
||||
i2ChanStrPtr pCh = p;
|
||||
i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
|
||||
unsigned long flags;
|
||||
|
||||
ip2trace(CHANN, ITRC_INPUT, 21, 0 );
|
||||
|
@ -1279,9 +1280,9 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
|
|||
}
|
||||
}
|
||||
|
||||
static void do_status(void *p)
|
||||
static void do_status(struct work_struct *work)
|
||||
{
|
||||
i2ChanStrPtr pCh = p;
|
||||
i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
|
||||
int status;
|
||||
|
||||
status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
|
||||
|
|
|
@ -530,9 +530,9 @@ sched_again:
|
|||
/* Interrupt handlers */
|
||||
|
||||
|
||||
static void isicom_bottomhalf(void *data)
|
||||
static void isicom_bottomhalf(struct work_struct *work)
|
||||
{
|
||||
struct isi_port *port = (struct isi_port *) data;
|
||||
struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
|
||||
struct tty_struct *tty = port->tty;
|
||||
|
||||
if (!tty)
|
||||
|
@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty)
|
|||
}
|
||||
|
||||
/* hangup et all */
|
||||
static void do_isicom_hangup(void *data)
|
||||
static void do_isicom_hangup(struct work_struct *work)
|
||||
{
|
||||
struct isi_port *port = data;
|
||||
struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
|
||||
struct tty_struct *tty;
|
||||
|
||||
tty = port->tty;
|
||||
|
@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void)
|
|||
port->channel = channel;
|
||||
port->close_delay = 50 * HZ/100;
|
||||
port->closing_wait = 3000 * HZ/100;
|
||||
INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
|
||||
INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
|
||||
INIT_WORK(&port->hangup_tq, do_isicom_hangup);
|
||||
INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
|
||||
port->status = 0;
|
||||
init_waitqueue_head(&port->open_wait);
|
||||
init_waitqueue_head(&port->close_wait);
|
||||
|
|
|
@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem;
|
|||
/*
|
||||
* static functions:
|
||||
*/
|
||||
static void do_moxa_softint(void *);
|
||||
static void do_moxa_softint(struct work_struct *);
|
||||
static int moxa_open(struct tty_struct *, struct file *);
|
||||
static void moxa_close(struct tty_struct *, struct file *);
|
||||
static int moxa_write(struct tty_struct *, const unsigned char *, int);
|
||||
|
@ -363,7 +363,7 @@ static int __init moxa_init(void)
|
|||
for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
|
||||
ch->type = PORT_16550A;
|
||||
ch->port = i;
|
||||
INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
|
||||
INIT_WORK(&ch->tqueue, do_moxa_softint);
|
||||
ch->tty = NULL;
|
||||
ch->close_delay = 5 * HZ / 10;
|
||||
ch->closing_wait = 30 * HZ;
|
||||
|
@ -509,9 +509,9 @@ static void __exit moxa_exit(void)
|
|||
module_init(moxa_init);
|
||||
module_exit(moxa_exit);
|
||||
|
||||
static void do_moxa_softint(void *private_)
|
||||
static void do_moxa_softint(struct work_struct *work)
|
||||
{
|
||||
struct moxa_str *ch = (struct moxa_str *) private_;
|
||||
struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
|
||||
struct tty_struct *tty;
|
||||
|
||||
if (ch && (tty = ch->tty)) {
|
||||
|
|
|
@ -389,7 +389,7 @@ static int mxser_init(void);
|
|||
/* static void mxser_poll(unsigned long); */
|
||||
static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
|
||||
static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
|
||||
static void mxser_do_softint(void *);
|
||||
static void mxser_do_softint(struct work_struct *);
|
||||
static int mxser_open(struct tty_struct *, struct file *);
|
||||
static void mxser_close(struct tty_struct *, struct file *);
|
||||
static int mxser_write(struct tty_struct *, const unsigned char *, int);
|
||||
|
@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
|
|||
info->custom_divisor = hwconf->baud_base[i] * 16;
|
||||
info->close_delay = 5 * HZ / 10;
|
||||
info->closing_wait = 30 * HZ;
|
||||
INIT_WORK(&info->tqueue, mxser_do_softint, info);
|
||||
INIT_WORK(&info->tqueue, mxser_do_softint);
|
||||
info->normal_termios = mxvar_sdriver->init_termios;
|
||||
init_waitqueue_head(&info->open_wait);
|
||||
init_waitqueue_head(&info->close_wait);
|
||||
|
@ -917,9 +917,10 @@ static int mxser_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mxser_do_softint(void *private_)
|
||||
static void mxser_do_softint(struct work_struct *work)
|
||||
{
|
||||
struct mxser_struct *info = private_;
|
||||
struct mxser_struct *info =
|
||||
container_of(work, struct mxser_struct, tqueue);
|
||||
struct tty_struct *tty;
|
||||
|
||||
tty = info->tty;
|
||||
|
|
|
@ -421,7 +421,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id);
|
|||
/*
|
||||
* Bottom half interrupt handlers
|
||||
*/
|
||||
static void bh_handler(void* Context);
|
||||
static void bh_handler(struct work_struct *work);
|
||||
static void bh_transmit(MGSLPC_INFO *info);
|
||||
static void bh_status(MGSLPC_INFO *info);
|
||||
|
||||
|
@ -547,7 +547,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
|
|||
|
||||
memset(info, 0, sizeof(MGSLPC_INFO));
|
||||
info->magic = MGSLPC_MAGIC;
|
||||
INIT_WORK(&info->task, bh_handler, info);
|
||||
INIT_WORK(&info->task, bh_handler);
|
||||
info->max_frame_size = 4096;
|
||||
info->close_delay = 5*HZ/10;
|
||||
info->closing_wait = 30*HZ;
|
||||
|
@ -835,9 +835,9 @@ static int bh_action(MGSLPC_INFO *info)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void bh_handler(void* Context)
|
||||
static void bh_handler(struct work_struct *work)
|
||||
{
|
||||
MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
|
||||
MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
|
||||
int action;
|
||||
|
||||
if (!info)
|
||||
|
|
|
@ -1422,9 +1422,9 @@ static struct keydata {
|
|||
|
||||
static unsigned int ip_cnt;
|
||||
|
||||
static void rekey_seq_generator(void *private_);
|
||||
static void rekey_seq_generator(struct work_struct *work);
|
||||
|
||||
static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
|
||||
static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
|
||||
|
||||
/*
|
||||
* Lock avoidance:
|
||||
|
@ -1438,7 +1438,7 @@ static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
|
|||
* happen, and even if that happens only a not perfectly compliant
|
||||
* ISN is generated, nothing fatal.
|
||||
*/
|
||||
static void rekey_seq_generator(void *private_)
|
||||
static void rekey_seq_generator(struct work_struct *work)
|
||||
{
|
||||
struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
|
||||
|
||||
|
|
|
@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state)
|
|||
sonypi_device.bluetooth_power = state;
|
||||
}
|
||||
|
||||
static void input_keyrelease(void *data)
|
||||
static void input_keyrelease(struct work_struct *work)
|
||||
{
|
||||
struct sonypi_keypress kp;
|
||||
|
||||
|
@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
|
|||
goto err_inpdev_unregister;
|
||||
}
|
||||
|
||||
INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
|
||||
INIT_WORK(&sonypi_device.input_work, input_keyrelease);
|
||||
}
|
||||
|
||||
sonypi_enable(0);
|
||||
|
|
|
@ -2261,9 +2261,10 @@ static void sx_start(struct tty_struct * tty)
|
|||
* do_sx_hangup() -> tty->hangup() -> sx_hangup()
|
||||
*
|
||||
*/
|
||||
static void do_sx_hangup(void *private_)
|
||||
static void do_sx_hangup(struct work_struct *work)
|
||||
{
|
||||
struct specialix_port *port = (struct specialix_port *) private_;
|
||||
struct specialix_port *port =
|
||||
container_of(work, struct specialix_port, tqueue_hangup);
|
||||
struct tty_struct *tty;
|
||||
|
||||
func_enter();
|
||||
|
@ -2336,9 +2337,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios
|
|||
}
|
||||
|
||||
|
||||
static void do_softint(void *private_)
|
||||
static void do_softint(struct work_struct *work)
|
||||
{
|
||||
struct specialix_port *port = (struct specialix_port *) private_;
|
||||
struct specialix_port *port =
|
||||
container_of(work, struct specialix_port, tqueue);
|
||||
struct tty_struct *tty;
|
||||
|
||||
func_enter();
|
||||
|
@ -2411,8 +2413,8 @@ static int sx_init_drivers(void)
|
|||
memset(sx_port, 0, sizeof(sx_port));
|
||||
for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
|
||||
sx_port[i].magic = SPECIALIX_MAGIC;
|
||||
INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
|
||||
INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
|
||||
INIT_WORK(&sx_port[i].tqueue, do_softint);
|
||||
INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
|
||||
sx_port[i].close_delay = 50 * HZ/100;
|
||||
sx_port[i].closing_wait = 3000 * HZ/100;
|
||||
init_waitqueue_head(&sx_port[i].open_wait);
|
||||
|
|
|
@ -802,7 +802,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u
|
|||
/*
|
||||
* Bottom half interrupt handlers
|
||||
*/
|
||||
static void mgsl_bh_handler(void* Context);
|
||||
static void mgsl_bh_handler(struct work_struct *work);
|
||||
static void mgsl_bh_receive(struct mgsl_struct *info);
|
||||
static void mgsl_bh_transmit(struct mgsl_struct *info);
|
||||
static void mgsl_bh_status(struct mgsl_struct *info);
|
||||
|
@ -1071,9 +1071,10 @@ static int mgsl_bh_action(struct mgsl_struct *info)
|
|||
/*
|
||||
* Perform bottom half processing of work items queued by ISR.
|
||||
*/
|
||||
static void mgsl_bh_handler(void* Context)
|
||||
static void mgsl_bh_handler(struct work_struct *work)
|
||||
{
|
||||
struct mgsl_struct *info = (struct mgsl_struct*)Context;
|
||||
struct mgsl_struct *info =
|
||||
container_of(work, struct mgsl_struct, task);
|
||||
int action;
|
||||
|
||||
if (!info)
|
||||
|
@ -4337,7 +4338,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
|
|||
} else {
|
||||
memset(info, 0, sizeof(struct mgsl_struct));
|
||||
info->magic = MGSL_MAGIC;
|
||||
INIT_WORK(&info->task, mgsl_bh_handler, info);
|
||||
INIT_WORK(&info->task, mgsl_bh_handler);
|
||||
info->max_frame_size = 4096;
|
||||
info->close_delay = 5*HZ/10;
|
||||
info->closing_wait = 30*HZ;
|
||||
|
|
|
@ -485,7 +485,7 @@ static void enable_loopback(struct slgt_info *info);
|
|||
static void set_rate(struct slgt_info *info, u32 data_rate);
|
||||
|
||||
static int bh_action(struct slgt_info *info);
|
||||
static void bh_handler(void* context);
|
||||
static void bh_handler(struct work_struct *work);
|
||||
static void bh_transmit(struct slgt_info *info);
|
||||
static void isr_serial(struct slgt_info *info);
|
||||
static void isr_rdma(struct slgt_info *info);
|
||||
|
@ -1878,9 +1878,9 @@ static int bh_action(struct slgt_info *info)
|
|||
/*
|
||||
* perform bottom half processing
|
||||
*/
|
||||
static void bh_handler(void* context)
|
||||
static void bh_handler(struct work_struct *work)
|
||||
{
|
||||
struct slgt_info *info = context;
|
||||
struct slgt_info *info = container_of(work, struct slgt_info, task);
|
||||
int action;
|
||||
|
||||
if (!info)
|
||||
|
@ -3326,7 +3326,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
|
|||
} else {
|
||||
memset(info, 0, sizeof(struct slgt_info));
|
||||
info->magic = MGSL_MAGIC;
|
||||
INIT_WORK(&info->task, bh_handler, info);
|
||||
INIT_WORK(&info->task, bh_handler);
|
||||
info->max_frame_size = 4096;
|
||||
info->raw_rx_size = DMABUFSIZE;
|
||||
info->close_delay = 5*HZ/10;
|
||||
|
@ -4799,6 +4799,6 @@ static void rx_timeout(unsigned long context)
|
|||
spin_lock_irqsave(&info->lock, flags);
|
||||
info->pending_bh |= BH_RECEIVE;
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
bh_handler(info);
|
||||
bh_handler(&info->task);
|
||||
}
|
||||
|
||||
|
|
|
@ -602,7 +602,7 @@ static void enable_loopback(SLMP_INFO *info, int enable);
|
|||
static void set_rate(SLMP_INFO *info, u32 data_rate);
|
||||
|
||||
static int bh_action(SLMP_INFO *info);
|
||||
static void bh_handler(void* Context);
|
||||
static void bh_handler(struct work_struct *work);
|
||||
static void bh_receive(SLMP_INFO *info);
|
||||
static void bh_transmit(SLMP_INFO *info);
|
||||
static void bh_status(SLMP_INFO *info);
|
||||
|
@ -2063,9 +2063,9 @@ int bh_action(SLMP_INFO *info)
|
|||
|
||||
/* Perform bottom half processing of work items queued by ISR.
|
||||
*/
|
||||
void bh_handler(void* Context)
|
||||
void bh_handler(struct work_struct *work)
|
||||
{
|
||||
SLMP_INFO *info = (SLMP_INFO*)Context;
|
||||
SLMP_INFO *info = container_of(work, SLMP_INFO, task);
|
||||
int action;
|
||||
|
||||
if (!info)
|
||||
|
@ -3805,7 +3805,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
|
|||
} else {
|
||||
memset(info, 0, sizeof(SLMP_INFO));
|
||||
info->magic = MGSL_MAGIC;
|
||||
INIT_WORK(&info->task, bh_handler, info);
|
||||
INIT_WORK(&info->task, bh_handler);
|
||||
info->max_frame_size = 4096;
|
||||
info->close_delay = 5*HZ/10;
|
||||
info->closing_wait = 30*HZ;
|
||||
|
|
|
@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = {
|
|||
.enable_mask = SYSRQ_ENABLE_SIGNAL,
|
||||
};
|
||||
|
||||
static void moom_callback(void *ignored)
|
||||
static void moom_callback(struct work_struct *ignored)
|
||||
{
|
||||
out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
|
||||
GFP_KERNEL, 0);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(moom_work, moom_callback, NULL);
|
||||
static DECLARE_WORK(moom_work, moom_callback);
|
||||
|
||||
static void sysrq_handle_moom(int key, struct tty_struct *tty)
|
||||
{
|
||||
|
|
|
@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr)
|
|||
schedule_work(&chip->work);
|
||||
}
|
||||
|
||||
static void timeout_work(void *ptr)
|
||||
static void timeout_work(struct work_struct *work)
|
||||
{
|
||||
struct tpm_chip *chip = ptr;
|
||||
struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
|
||||
|
||||
down(&chip->buffer_mutex);
|
||||
atomic_set(&chip->data_pending, 0);
|
||||
|
@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
|
|||
init_MUTEX(&chip->tpm_mutex);
|
||||
INIT_LIST_HEAD(&chip->list);
|
||||
|
||||
INIT_WORK(&chip->work, timeout_work, chip);
|
||||
INIT_WORK(&chip->work, timeout_work);
|
||||
|
||||
init_timer(&chip->user_read_timer);
|
||||
chip->user_read_timer.function = user_reader_timeout;
|
||||
|
|
|
@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
|
|||
|
||||
/**
|
||||
* do_tty_hangup - actual handler for hangup events
|
||||
* @data: tty device
|
||||
* @work: tty device
|
||||
*
|
||||
* This can be called by the "eventd" kernel thread. That is process
|
||||
* synchronous but doesn't hold any locks, so we need to make sure we
|
||||
|
@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
|
|||
* tasklist_lock to walk task list for hangup event
|
||||
*
|
||||
*/
|
||||
static void do_tty_hangup(void *data)
|
||||
static void do_tty_hangup(struct work_struct *work)
|
||||
{
|
||||
struct tty_struct *tty = (struct tty_struct *) data;
|
||||
struct tty_struct *tty =
|
||||
container_of(work, struct tty_struct, hangup_work);
|
||||
struct file * cons_filp = NULL;
|
||||
struct file *filp, *f = NULL;
|
||||
struct task_struct *p;
|
||||
|
@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty)
|
|||
|
||||
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
|
||||
#endif
|
||||
do_tty_hangup((void *) tty);
|
||||
do_tty_hangup(&tty->hangup_work);
|
||||
}
|
||||
EXPORT_SYMBOL(tty_vhangup);
|
||||
|
||||
|
@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file,
|
|||
* Nasty bug: do_SAK is being called in interrupt context. This can
|
||||
* deadlock. We punt it up to process context. AKPM - 16Mar2001
|
||||
*/
|
||||
static void __do_SAK(void *arg)
|
||||
static void __do_SAK(struct work_struct *work)
|
||||
{
|
||||
struct tty_struct *tty =
|
||||
container_of(work, struct tty_struct, SAK_work);
|
||||
#ifdef TTY_SOFT_SAK
|
||||
tty_hangup(tty);
|
||||
#else
|
||||
struct tty_struct *tty = arg;
|
||||
struct task_struct *g, *p;
|
||||
int session;
|
||||
int i;
|
||||
|
@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty)
|
|||
{
|
||||
if (!tty)
|
||||
return;
|
||||
PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
|
||||
PREPARE_WORK(&tty->SAK_work, __do_SAK);
|
||||
schedule_work(&tty->SAK_work);
|
||||
}
|
||||
|
||||
|
@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK);
|
|||
|
||||
/**
|
||||
* flush_to_ldisc
|
||||
* @private_: tty structure passed from work queue.
|
||||
* @work: tty structure passed from work queue.
|
||||
*
|
||||
* This routine is called out of the software interrupt to flush data
|
||||
* from the buffer chain to the line discipline.
|
||||
|
@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK);
|
|||
* receive_buf method is single threaded for each tty instance.
|
||||
*/
|
||||
|
||||
static void flush_to_ldisc(void *private_)
|
||||
static void flush_to_ldisc(struct work_struct *work)
|
||||
{
|
||||
struct tty_struct *tty = (struct tty_struct *) private_;
|
||||
struct tty_struct *tty =
|
||||
container_of(work, struct tty_struct, buf.work.work);
|
||||
unsigned long flags;
|
||||
struct tty_ldisc *disc;
|
||||
struct tty_buffer *tbuf, *head;
|
||||
|
@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
|
|||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
|
||||
if (tty->low_latency)
|
||||
flush_to_ldisc((void *) tty);
|
||||
flush_to_ldisc(&tty->buf.work.work);
|
||||
else
|
||||
schedule_delayed_work(&tty->buf.work, 1);
|
||||
}
|
||||
|
@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty)
|
|||
tty->overrun_time = jiffies;
|
||||
tty->buf.head = tty->buf.tail = NULL;
|
||||
tty_buffer_init(tty);
|
||||
INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
|
||||
INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
|
||||
init_MUTEX(&tty->buf.pty_sem);
|
||||
mutex_init(&tty->termios_mutex);
|
||||
init_waitqueue_head(&tty->write_wait);
|
||||
init_waitqueue_head(&tty->read_wait);
|
||||
INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
|
||||
INIT_WORK(&tty->hangup_work, do_tty_hangup);
|
||||
mutex_init(&tty->atomic_read_lock);
|
||||
mutex_init(&tty->atomic_write_lock);
|
||||
spin_lock_init(&tty->read_lock);
|
||||
INIT_LIST_HEAD(&tty->tty_files);
|
||||
INIT_WORK(&tty->SAK_work, NULL, NULL);
|
||||
INIT_WORK(&tty->SAK_work, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty);
|
|||
static void set_vesa_blanking(char __user *p);
|
||||
static void set_cursor(struct vc_data *vc);
|
||||
static void hide_cursor(struct vc_data *vc);
|
||||
static void console_callback(void *ignored);
|
||||
static void console_callback(struct work_struct *ignored);
|
||||
static void blank_screen_t(unsigned long dummy);
|
||||
static void set_palette(struct vc_data *vc);
|
||||
|
||||
|
@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
|
|||
static int blankinterval = 10*60*HZ;
|
||||
static int vesa_off_interval;
|
||||
|
||||
static DECLARE_WORK(console_work, console_callback, NULL);
|
||||
static DECLARE_WORK(console_work, console_callback);
|
||||
|
||||
/*
|
||||
* fg_console is the current virtual console,
|
||||
|
@ -2154,7 +2154,7 @@ out:
|
|||
* with other console code and prevention of re-entrancy is
|
||||
* ensured with console_sem.
|
||||
*/
|
||||
static void console_callback(void *ignored)
|
||||
static void console_callback(struct work_struct *ignored)
|
||||
{
|
||||
acquire_console_sem();
|
||||
|
||||
|
|
|
@ -31,9 +31,11 @@
|
|||
#include <linux/connector.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
void cn_queue_wrapper(void *data)
|
||||
void cn_queue_wrapper(struct work_struct *work)
|
||||
{
|
||||
struct cn_callback_data *d = data;
|
||||
struct cn_callback_entry *cbq =
|
||||
container_of(work, struct cn_callback_entry, work.work);
|
||||
struct cn_callback_data *d = &cbq->data;
|
||||
|
||||
d->callback(d->callback_priv);
|
||||
|
||||
|
@ -57,7 +59,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
|
|||
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
|
||||
cbq->data.callback = callback;
|
||||
|
||||
INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
|
||||
INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
|
||||
return cbq;
|
||||
}
|
||||
|
||||
|
|
|
@ -135,40 +135,39 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
|
|||
spin_lock_bh(&dev->cbdev->queue_lock);
|
||||
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
|
||||
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
|
||||
if (likely(!test_bit(0, &__cbq->work.pending) &&
|
||||
if (likely(!test_bit(WORK_STRUCT_PENDING,
|
||||
&__cbq->work.work.management) &&
|
||||
__cbq->data.ddata == NULL)) {
|
||||
__cbq->data.callback_priv = msg;
|
||||
|
||||
__cbq->data.ddata = data;
|
||||
__cbq->data.destruct_data = destruct_data;
|
||||
|
||||
if (queue_work(dev->cbdev->cn_queue,
|
||||
&__cbq->work))
|
||||
if (queue_delayed_work(
|
||||
dev->cbdev->cn_queue,
|
||||
&__cbq->work, 0))
|
||||
err = 0;
|
||||
} else {
|
||||
struct work_struct *w;
|
||||
struct cn_callback_data *d;
|
||||
|
||||
w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
|
||||
if (w) {
|
||||
d = (struct cn_callback_data *)(w+1);
|
||||
|
||||
__cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
|
||||
if (__cbq) {
|
||||
d = &__cbq->data;
|
||||
d->callback_priv = msg;
|
||||
d->callback = __cbq->data.callback;
|
||||
d->ddata = data;
|
||||
d->destruct_data = destruct_data;
|
||||
d->free = w;
|
||||
d->free = __cbq;
|
||||
|
||||
INIT_LIST_HEAD(&w->entry);
|
||||
w->pending = 0;
|
||||
w->func = &cn_queue_wrapper;
|
||||
w->data = d;
|
||||
init_timer(&w->timer);
|
||||
INIT_DELAYED_WORK(&__cbq->work,
|
||||
&cn_queue_wrapper);
|
||||
|
||||
if (queue_work(dev->cbdev->cn_queue, w))
|
||||
if (queue_delayed_work(
|
||||
dev->cbdev->cn_queue,
|
||||
&__cbq->work, 0))
|
||||
err = 0;
|
||||
else {
|
||||
kfree(w);
|
||||
kfree(__cbq);
|
||||
err = -EINVAL;
|
||||
}
|
||||
} else
|
||||
|
|
|
@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
|
|||
|
||||
/* internal prototypes */
|
||||
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
|
||||
static void handle_update(void *data);
|
||||
static void handle_update(struct work_struct *work);
|
||||
|
||||
/**
|
||||
* Two notifier lists: the "policy" list is involved in the
|
||||
|
@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
|
|||
mutex_init(&policy->lock);
|
||||
mutex_lock(&policy->lock);
|
||||
init_completion(&policy->kobj_unregister);
|
||||
INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
|
||||
INIT_WORK(&policy->update, handle_update);
|
||||
|
||||
/* call driver. From then on the cpufreq must be able
|
||||
* to accept all calls to ->verify and ->setpolicy for this CPU
|
||||
|
@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
|
|||
}
|
||||
|
||||
|
||||
static void handle_update(void *data)
|
||||
static void handle_update(struct work_struct *work)
|
||||
{
|
||||
unsigned int cpu = (unsigned int)(long)data;
|
||||
struct cpufreq_policy *policy =
|
||||
container_of(work, struct cpufreq_policy, update);
|
||||
unsigned int cpu = policy->cpu;
|
||||
dprintk("handle_update for cpu %u called\n", cpu);
|
||||
cpufreq_update_policy(cpu);
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ static unsigned int def_sampling_rate;
|
|||
#define MAX_SAMPLING_DOWN_FACTOR (10)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
|
||||
|
||||
static void do_dbs_timer(void *data);
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
|
||||
struct cpu_dbs_info_s {
|
||||
struct cpufreq_policy *cur_policy;
|
||||
|
@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
|
|||
* is recursive for the same process. -Venki
|
||||
*/
|
||||
static DEFINE_MUTEX (dbs_mutex);
|
||||
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
|
||||
static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
|
||||
|
||||
struct dbs_tuners {
|
||||
unsigned int sampling_rate;
|
||||
|
@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu)
|
|||
}
|
||||
}
|
||||
|
||||
static void do_dbs_timer(void *data)
|
||||
static void do_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
int i;
|
||||
lock_cpu_hotplug();
|
||||
|
@ -435,7 +435,6 @@ static void do_dbs_timer(void *data)
|
|||
|
||||
static inline void dbs_timer_init(void)
|
||||
{
|
||||
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
|
||||
schedule_delayed_work(&dbs_work,
|
||||
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
||||
return;
|
||||
|
|
|
@ -47,13 +47,17 @@ static unsigned int def_sampling_rate;
|
|||
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
|
||||
|
||||
static void do_dbs_timer(void *data);
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
|
||||
/* Sampling types */
|
||||
enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
|
||||
|
||||
struct cpu_dbs_info_s {
|
||||
cputime64_t prev_cpu_idle;
|
||||
cputime64_t prev_cpu_wall;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
enum dbs_sample sample_type;
|
||||
unsigned int enable;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
unsigned int freq_lo;
|
||||
|
@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
|||
}
|
||||
}
|
||||
|
||||
/* Sampling types */
|
||||
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
|
||||
|
||||
static void do_dbs_timer(void *data)
|
||||
static void do_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
||||
enum dbs_sample sample_type = dbs_info->sample_type;
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
|
||||
/* Permit rescheduling of this work item */
|
||||
work_release(work);
|
||||
|
||||
delay -= jiffies % delay;
|
||||
|
||||
if (!dbs_info->enable)
|
||||
return;
|
||||
/* Common NORMAL_SAMPLE setup */
|
||||
INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
|
||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||
if (!dbs_tuners_ins.powersave_bias ||
|
||||
(unsigned long) data == DBS_NORMAL_SAMPLE) {
|
||||
sample_type == DBS_NORMAL_SAMPLE) {
|
||||
lock_cpu_hotplug();
|
||||
dbs_check_cpu(dbs_info);
|
||||
unlock_cpu_hotplug();
|
||||
if (dbs_info->freq_lo) {
|
||||
/* Setup timer for SUB_SAMPLE */
|
||||
INIT_WORK(&dbs_info->work, do_dbs_timer,
|
||||
(void *)DBS_SUB_SAMPLE);
|
||||
dbs_info->sample_type = DBS_SUB_SAMPLE;
|
||||
delay = dbs_info->freq_hi_jiffies;
|
||||
}
|
||||
} else {
|
||||
|
@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu)
|
|||
delay -= jiffies % delay;
|
||||
|
||||
ondemand_powersave_bias_init();
|
||||
INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
|
||||
INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
|
||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
|
||||
}
|
||||
|
||||
|
|
|
@ -140,12 +140,14 @@ ulong ds1374_get_rtc_time(void)
|
|||
return t1;
|
||||
}
|
||||
|
||||
static void ds1374_set_work(void *arg)
|
||||
static ulong new_time;
|
||||
|
||||
static void ds1374_set_work(struct work_struct *work)
|
||||
{
|
||||
ulong t1, t2;
|
||||
int limit = 10; /* arbitrary retry limit */
|
||||
|
||||
t1 = *(ulong *) arg;
|
||||
t1 = new_time;
|
||||
|
||||
mutex_lock(&ds1374_mutex);
|
||||
|
||||
|
@ -167,11 +169,9 @@ static void ds1374_set_work(void *arg)
|
|||
"can't confirm time set from rtc chip\n");
|
||||
}
|
||||
|
||||
static ulong new_time;
|
||||
|
||||
static struct workqueue_struct *ds1374_workqueue;
|
||||
|
||||
static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
|
||||
static DECLARE_WORK(ds1374_work, ds1374_set_work);
|
||||
|
||||
int ds1374_set_rtc_time(ulong nowtime)
|
||||
{
|
||||
|
@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime)
|
|||
if (in_interrupt())
|
||||
queue_work(ds1374_workqueue, &ds1374_work);
|
||||
else
|
||||
ds1374_set_work(&new_time);
|
||||
ds1374_set_work(NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -31,9 +31,10 @@
|
|||
#include "config_roms.h"
|
||||
|
||||
|
||||
static void delayed_reset_bus(void * __reset_info)
|
||||
static void delayed_reset_bus(struct work_struct *work)
|
||||
{
|
||||
struct hpsb_host *host = (struct hpsb_host*)__reset_info;
|
||||
struct hpsb_host *host =
|
||||
container_of(work, struct hpsb_host, delayed_reset.work);
|
||||
int generation = host->csr.generation + 1;
|
||||
|
||||
/* The generation field rolls over to 2 rather than 0 per IEEE
|
||||
|
@ -145,7 +146,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
|
|||
|
||||
atomic_set(&h->generation, 0);
|
||||
|
||||
INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
|
||||
INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
|
||||
|
||||
init_timer(&h->timeout);
|
||||
h->timeout.data = (unsigned long) h;
|
||||
|
@ -234,7 +235,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
|
|||
* Config ROM in the near future. */
|
||||
reset_delay = HZ;
|
||||
|
||||
PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
|
||||
PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
|
||||
schedule_delayed_work(&host->delayed_reset, reset_delay);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -62,7 +62,7 @@ struct hpsb_host {
|
|||
struct class_device class_dev;
|
||||
|
||||
int update_config_rom;
|
||||
struct work_struct delayed_reset;
|
||||
struct delayed_work delayed_reset;
|
||||
unsigned int config_roms;
|
||||
|
||||
struct list_head addr_space;
|
||||
|
|
|
@ -493,20 +493,25 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
|
|||
scsi_unblock_requests(scsi_id->scsi_host);
|
||||
}
|
||||
|
||||
static void sbp2util_write_orb_pointer(void *p)
|
||||
static void sbp2util_write_orb_pointer(struct work_struct *work)
|
||||
{
|
||||
struct scsi_id_instance_data *scsi_id =
|
||||
container_of(work, struct scsi_id_instance_data,
|
||||
protocol_work.work);
|
||||
quadlet_t data[2];
|
||||
|
||||
data[0] = ORB_SET_NODE_ID(
|
||||
((struct scsi_id_instance_data *)p)->hi->host->node_id);
|
||||
data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
|
||||
data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
|
||||
data[1] = scsi_id->last_orb_dma;
|
||||
sbp2util_cpu_to_be32_buffer(data, 8);
|
||||
sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
|
||||
sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
|
||||
}
|
||||
|
||||
static void sbp2util_write_doorbell(void *p)
|
||||
static void sbp2util_write_doorbell(struct work_struct *work)
|
||||
{
|
||||
sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
|
||||
struct scsi_id_instance_data *scsi_id =
|
||||
container_of(work, struct scsi_id_instance_data,
|
||||
protocol_work.work);
|
||||
sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -843,7 +848,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
|
|||
INIT_LIST_HEAD(&scsi_id->scsi_list);
|
||||
spin_lock_init(&scsi_id->sbp2_command_orb_lock);
|
||||
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
|
||||
INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
|
||||
INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
|
||||
|
||||
ud->device.driver_data = scsi_id;
|
||||
|
||||
|
@ -2047,11 +2052,10 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
|
|||
* We do not accept new commands until the job is over.
|
||||
*/
|
||||
scsi_block_requests(scsi_id->scsi_host);
|
||||
PREPARE_WORK(&scsi_id->protocol_work,
|
||||
PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
|
||||
last_orb ? sbp2util_write_doorbell:
|
||||
sbp2util_write_orb_pointer,
|
||||
scsi_id);
|
||||
schedule_work(&scsi_id->protocol_work);
|
||||
sbp2util_write_orb_pointer);
|
||||
schedule_delayed_work(&scsi_id->protocol_work, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -348,7 +348,7 @@ struct scsi_id_instance_data {
|
|||
unsigned workarounds;
|
||||
|
||||
atomic_t state;
|
||||
struct work_struct protocol_work;
|
||||
struct delayed_work protocol_work;
|
||||
};
|
||||
|
||||
/* For use in scsi_id_instance_data.state */
|
||||
|
|
|
@ -55,11 +55,11 @@ struct addr_req {
|
|||
int status;
|
||||
};
|
||||
|
||||
static void process_req(void *data);
|
||||
static void process_req(struct work_struct *work);
|
||||
|
||||
static DEFINE_MUTEX(lock);
|
||||
static LIST_HEAD(req_list);
|
||||
static DECLARE_WORK(work, process_req, NULL);
|
||||
static DECLARE_DELAYED_WORK(work, process_req);
|
||||
static struct workqueue_struct *addr_wq;
|
||||
|
||||
void rdma_addr_register_client(struct rdma_addr_client *client)
|
||||
|
@ -215,7 +215,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void process_req(void *data)
|
||||
static void process_req(struct work_struct *work)
|
||||
{
|
||||
struct addr_req *req, *temp_req;
|
||||
struct sockaddr_in *src_in, *dst_in;
|
||||
|
|
|
@ -285,9 +285,10 @@ err:
|
|||
kfree(tprops);
|
||||
}
|
||||
|
||||
static void ib_cache_task(void *work_ptr)
|
||||
static void ib_cache_task(struct work_struct *_work)
|
||||
{
|
||||
struct ib_update_work *work = work_ptr;
|
||||
struct ib_update_work *work =
|
||||
container_of(_work, struct ib_update_work, work);
|
||||
|
||||
ib_cache_update(work->device, work->port_num);
|
||||
kfree(work);
|
||||
|
@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
|
|||
event->event == IB_EVENT_CLIENT_REREGISTER) {
|
||||
work = kmalloc(sizeof *work, GFP_ATOMIC);
|
||||
if (work) {
|
||||
INIT_WORK(&work->work, ib_cache_task, work);
|
||||
INIT_WORK(&work->work, ib_cache_task);
|
||||
work->device = event->device;
|
||||
work->port_num = event->element.port_num;
|
||||
schedule_work(&work->work);
|
||||
|
|
|
@ -101,7 +101,7 @@ struct cm_av {
|
|||
};
|
||||
|
||||
struct cm_work {
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
struct list_head list;
|
||||
struct cm_port *port;
|
||||
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
|
||||
|
@ -161,7 +161,7 @@ struct cm_id_private {
|
|||
atomic_t work_count;
|
||||
};
|
||||
|
||||
static void cm_work_handler(void *data);
|
||||
static void cm_work_handler(struct work_struct *work);
|
||||
|
||||
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
|
@ -668,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
timewait_info->work.local_id = local_id;
|
||||
INIT_WORK(&timewait_info->work.work, cm_work_handler,
|
||||
&timewait_info->work);
|
||||
INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
|
||||
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
|
||||
return timewait_info;
|
||||
}
|
||||
|
@ -2995,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
|
|||
}
|
||||
}
|
||||
|
||||
static void cm_work_handler(void *data)
|
||||
static void cm_work_handler(struct work_struct *_work)
|
||||
{
|
||||
struct cm_work *work = data;
|
||||
struct cm_work *work = container_of(_work, struct cm_work, work.work);
|
||||
int ret;
|
||||
|
||||
switch (work->cm_event.event) {
|
||||
|
@ -3087,12 +3086,12 @@ static int cm_establish(struct ib_cm_id *cm_id)
|
|||
* we need to find the cm_id once we're in the context of the
|
||||
* worker thread, rather than holding a reference on it.
|
||||
*/
|
||||
INIT_WORK(&work->work, cm_work_handler, work);
|
||||
INIT_DELAYED_WORK(&work->work, cm_work_handler);
|
||||
work->local_id = cm_id->local_id;
|
||||
work->remote_id = cm_id->remote_id;
|
||||
work->mad_recv_wc = NULL;
|
||||
work->cm_event.event = IB_CM_USER_ESTABLISHED;
|
||||
queue_work(cm.wq, &work->work);
|
||||
queue_delayed_work(cm.wq, &work->work, 0);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -3191,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
|
|||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, cm_work_handler, work);
|
||||
INIT_DELAYED_WORK(&work->work, cm_work_handler);
|
||||
work->cm_event.event = event;
|
||||
work->mad_recv_wc = mad_recv_wc;
|
||||
work->port = (struct cm_port *)mad_agent->context;
|
||||
queue_work(cm.wq, &work->work);
|
||||
queue_delayed_work(cm.wq, &work->work, 0);
|
||||
}
|
||||
|
||||
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
|
||||
|
|
|
@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
|
|||
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
|
||||
}
|
||||
|
||||
static void cma_work_handler(void *data)
|
||||
static void cma_work_handler(struct work_struct *_work)
|
||||
{
|
||||
struct cma_work *work = data;
|
||||
struct cma_work *work = container_of(_work, struct cma_work, work);
|
||||
struct rdma_id_private *id_priv = work->id;
|
||||
int destroy = 0;
|
||||
|
||||
|
@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
|
|||
return -ENOMEM;
|
||||
|
||||
work->id = id_priv;
|
||||
INIT_WORK(&work->work, cma_work_handler, work);
|
||||
INIT_WORK(&work->work, cma_work_handler);
|
||||
work->old_state = CMA_ROUTE_QUERY;
|
||||
work->new_state = CMA_ROUTE_RESOLVED;
|
||||
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
|
||||
|
@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
|
|||
return -ENOMEM;
|
||||
|
||||
work->id = id_priv;
|
||||
INIT_WORK(&work->work, cma_work_handler, work);
|
||||
INIT_WORK(&work->work, cma_work_handler);
|
||||
work->old_state = CMA_ROUTE_QUERY;
|
||||
work->new_state = CMA_ROUTE_RESOLVED;
|
||||
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
|
||||
|
@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
|
|||
}
|
||||
|
||||
work->id = id_priv;
|
||||
INIT_WORK(&work->work, cma_work_handler, work);
|
||||
INIT_WORK(&work->work, cma_work_handler);
|
||||
work->old_state = CMA_ADDR_QUERY;
|
||||
work->new_state = CMA_ADDR_RESOLVED;
|
||||
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
|
||||
|
|
|
@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
|
|||
* thread asleep on the destroy_comp list vs. an object destroyed
|
||||
* here synchronously when the last reference is removed.
|
||||
*/
|
||||
static void cm_work_handler(void *arg)
|
||||
static void cm_work_handler(struct work_struct *_work)
|
||||
{
|
||||
struct iwcm_work *work = arg;
|
||||
struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
|
||||
struct iw_cm_event levent;
|
||||
struct iwcm_id_private *cm_id_priv = work->cm_id;
|
||||
unsigned long flags;
|
||||
|
@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
|
|||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, cm_work_handler, work);
|
||||
INIT_WORK(&work->work, cm_work_handler);
|
||||
work->cm_id = cm_id_priv;
|
||||
work->event = *iw_event;
|
||||
|
||||
|
|
|
@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
|
|||
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
||||
struct ib_mad_private *mad);
|
||||
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
|
||||
static void timeout_sends(void *data);
|
||||
static void local_completions(void *data);
|
||||
static void timeout_sends(struct work_struct *work);
|
||||
static void local_completions(struct work_struct *work);
|
||||
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
||||
struct ib_mad_agent_private *agent_priv,
|
||||
u8 mgmt_class);
|
||||
|
@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
|||
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
|
||||
INIT_LIST_HEAD(&mad_agent_priv->done_list);
|
||||
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
|
||||
INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
|
||||
INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
|
||||
INIT_LIST_HEAD(&mad_agent_priv->local_list);
|
||||
INIT_WORK(&mad_agent_priv->local_work, local_completions,
|
||||
mad_agent_priv);
|
||||
INIT_WORK(&mad_agent_priv->local_work, local_completions);
|
||||
atomic_set(&mad_agent_priv->refcount, 1);
|
||||
init_completion(&mad_agent_priv->comp);
|
||||
|
||||
|
@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
|
|||
/*
|
||||
* IB MAD completion callback
|
||||
*/
|
||||
static void ib_mad_completion_handler(void *data)
|
||||
static void ib_mad_completion_handler(struct work_struct *work)
|
||||
{
|
||||
struct ib_mad_port_private *port_priv;
|
||||
struct ib_wc wc;
|
||||
|
||||
port_priv = (struct ib_mad_port_private *)data;
|
||||
port_priv = container_of(work, struct ib_mad_port_private, work);
|
||||
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
|
||||
|
||||
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
|
||||
|
@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_cancel_mad);
|
||||
|
||||
static void local_completions(void *data)
|
||||
static void local_completions(struct work_struct *work)
|
||||
{
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_mad_local_private *local;
|
||||
|
@ -2334,7 +2333,8 @@ static void local_completions(void *data)
|
|||
struct ib_wc wc;
|
||||
struct ib_mad_send_wc mad_send_wc;
|
||||
|
||||
mad_agent_priv = (struct ib_mad_agent_private *)data;
|
||||
mad_agent_priv =
|
||||
container_of(work, struct ib_mad_agent_private, local_work);
|
||||
|
||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||
while (!list_empty(&mad_agent_priv->local_list)) {
|
||||
|
@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void timeout_sends(void *data)
|
||||
static void timeout_sends(struct work_struct *work)
|
||||
{
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_mad_send_wr_private *mad_send_wr;
|
||||
struct ib_mad_send_wc mad_send_wc;
|
||||
unsigned long flags, delay;
|
||||
|
||||
mad_agent_priv = (struct ib_mad_agent_private *)data;
|
||||
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
|
||||
timed_work.work);
|
||||
mad_send_wc.vendor_err = 0;
|
||||
|
||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||
|
@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
ret = -ENOMEM;
|
||||
goto error8;
|
||||
}
|
||||
INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
|
||||
INIT_WORK(&port_priv->work, ib_mad_completion_handler);
|
||||
|
||||
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
|
||||
list_add_tail(&port_priv->port_list, &ib_mad_port_list);
|
||||
|
|
|
@ -102,7 +102,7 @@ struct ib_mad_agent_private {
|
|||
struct list_head send_list;
|
||||
struct list_head wait_list;
|
||||
struct list_head done_list;
|
||||
struct work_struct timed_work;
|
||||
struct delayed_work timed_work;
|
||||
unsigned long timeout;
|
||||
struct list_head local_list;
|
||||
struct work_struct local_work;
|
||||
|
|
|
@ -45,8 +45,8 @@ enum rmpp_state {
|
|||
struct mad_rmpp_recv {
|
||||
struct ib_mad_agent_private *agent;
|
||||
struct list_head list;
|
||||
struct work_struct timeout_work;
|
||||
struct work_struct cleanup_work;
|
||||
struct delayed_work timeout_work;
|
||||
struct delayed_work cleanup_work;
|
||||
struct completion comp;
|
||||
enum rmpp_state state;
|
||||
spinlock_t lock;
|
||||
|
@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent,
|
|||
}
|
||||
}
|
||||
|
||||
static void recv_timeout_handler(void *data)
|
||||
static void recv_timeout_handler(struct work_struct *work)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv = data;
|
||||
struct mad_rmpp_recv *rmpp_recv =
|
||||
container_of(work, struct mad_rmpp_recv, timeout_work.work);
|
||||
struct ib_mad_recv_wc *rmpp_wc;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data)
|
|||
ib_free_recv_mad(rmpp_wc);
|
||||
}
|
||||
|
||||
static void recv_cleanup_handler(void *data)
|
||||
static void recv_cleanup_handler(struct work_struct *work)
|
||||
{
|
||||
struct mad_rmpp_recv *rmpp_recv = data;
|
||||
struct mad_rmpp_recv *rmpp_recv =
|
||||
container_of(work, struct mad_rmpp_recv, cleanup_work.work);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
|
||||
|
@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
|
|||
|
||||
rmpp_recv->agent = agent;
|
||||
init_completion(&rmpp_recv->comp);
|
||||
INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
|
||||
INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
|
||||
INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
|
||||
INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
|
||||
spin_lock_init(&rmpp_recv->lock);
|
||||
rmpp_recv->state = RMPP_STATE_ACTIVE;
|
||||
atomic_set(&rmpp_recv->refcount, 1);
|
||||
|
|
|
@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref)
|
|||
kfree(sm_ah);
|
||||
}
|
||||
|
||||
static void update_sm_ah(void *port_ptr)
|
||||
static void update_sm_ah(struct work_struct *work)
|
||||
{
|
||||
struct ib_sa_port *port = port_ptr;
|
||||
struct ib_sa_port *port =
|
||||
container_of(work, struct ib_sa_port, update_task);
|
||||
struct ib_sa_sm_ah *new_ah, *old_ah;
|
||||
struct ib_port_attr port_attr;
|
||||
struct ib_ah_attr ah_attr;
|
||||
|
@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device)
|
|||
if (IS_ERR(sa_dev->port[i].agent))
|
||||
goto err;
|
||||
|
||||
INIT_WORK(&sa_dev->port[i].update_task,
|
||||
update_sm_ah, &sa_dev->port[i]);
|
||||
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
|
||||
}
|
||||
|
||||
ib_set_client_data(device, &sa_client, sa_dev);
|
||||
|
@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
|
|||
goto err;
|
||||
|
||||
for (i = 0; i <= e - s; ++i)
|
||||
update_sm_ah(&sa_dev->port[i]);
|
||||
update_sm_ah(&sa_dev->port[i].update_task);
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
|
|||
up_write(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
static void ib_umem_account(void *work_ptr)
|
||||
static void ib_umem_account(struct work_struct *_work)
|
||||
{
|
||||
struct ib_umem_account_work *work = work_ptr;
|
||||
struct ib_umem_account_work *work =
|
||||
container_of(_work, struct ib_umem_account_work, work);
|
||||
|
||||
down_write(&work->mm->mmap_sem);
|
||||
work->mm->locked_vm -= work->diff;
|
||||
|
@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
|
|||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, ib_umem_account, work);
|
||||
INIT_WORK(&work->work, ib_umem_account);
|
||||
work->mm = mm;
|
||||
work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
|
||||
|
||||
|
|
|
@ -214,9 +214,10 @@ struct ipath_user_pages_work {
|
|||
unsigned long num_pages;
|
||||
};
|
||||
|
||||
static void user_pages_account(void *ptr)
|
||||
static void user_pages_account(struct work_struct *_work)
|
||||
{
|
||||
struct ipath_user_pages_work *work = ptr;
|
||||
struct ipath_user_pages_work *work =
|
||||
container_of(_work, struct ipath_user_pages_work, work);
|
||||
|
||||
down_write(&work->mm->mmap_sem);
|
||||
work->mm->locked_vm -= work->num_pages;
|
||||
|
@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
|
|||
|
||||
goto bail;
|
||||
|
||||
INIT_WORK(&work->work, user_pages_account, work);
|
||||
INIT_WORK(&work->work, user_pages_account);
|
||||
work->mm = mm;
|
||||
work->num_pages = num_pages;
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ static int catas_reset_disable;
|
|||
module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
|
||||
MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
|
||||
|
||||
static void catas_reset(void *work_ptr)
|
||||
static void catas_reset(struct work_struct *work)
|
||||
{
|
||||
struct mthca_dev *dev, *tmpdev;
|
||||
LIST_HEAD(tlist);
|
||||
|
@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
|
|||
|
||||
int __init mthca_catas_init(void)
|
||||
{
|
||||
INIT_WORK(&catas_work, catas_reset, NULL);
|
||||
INIT_WORK(&catas_work, catas_reset);
|
||||
|
||||
catas_wq = create_singlethread_workqueue("mthca_catas");
|
||||
if (!catas_wq)
|
||||
|
|
|
@ -136,11 +136,11 @@ struct ipoib_dev_priv {
|
|||
struct list_head multicast_list;
|
||||
struct rb_root multicast_tree;
|
||||
|
||||
struct work_struct pkey_task;
|
||||
struct work_struct mcast_task;
|
||||
struct delayed_work pkey_task;
|
||||
struct delayed_work mcast_task;
|
||||
struct work_struct flush_task;
|
||||
struct work_struct restart_task;
|
||||
struct work_struct ah_reap_task;
|
||||
struct delayed_work ah_reap_task;
|
||||
|
||||
struct ib_device *ca;
|
||||
u8 port;
|
||||
|
@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev);
|
|||
|
||||
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
struct ipoib_ah *address, u32 qpn);
|
||||
void ipoib_reap_ah(void *dev_ptr);
|
||||
void ipoib_reap_ah(struct work_struct *work);
|
||||
|
||||
void ipoib_flush_paths(struct net_device *dev);
|
||||
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
|
||||
|
||||
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
|
||||
void ipoib_ib_dev_flush(void *dev);
|
||||
void ipoib_ib_dev_flush(struct work_struct *work);
|
||||
void ipoib_ib_dev_cleanup(struct net_device *dev);
|
||||
|
||||
int ipoib_ib_dev_open(struct net_device *dev);
|
||||
|
@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev);
|
|||
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
|
||||
void ipoib_dev_cleanup(struct net_device *dev);
|
||||
|
||||
void ipoib_mcast_join_task(void *dev_ptr);
|
||||
void ipoib_mcast_join_task(struct work_struct *work);
|
||||
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
|
||||
|
||||
void ipoib_mcast_restart_task(void *dev_ptr);
|
||||
void ipoib_mcast_restart_task(struct work_struct *work);
|
||||
int ipoib_mcast_start_thread(struct net_device *dev);
|
||||
int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
|
||||
|
||||
|
@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler,
|
|||
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
|
||||
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
|
||||
|
||||
void ipoib_pkey_poll(void *dev);
|
||||
void ipoib_pkey_poll(struct work_struct *work);
|
||||
int ipoib_pkey_dev_delay_open(struct net_device *dev);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
|
|
|
@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev)
|
|||
spin_unlock_irq(&priv->tx_lock);
|
||||
}
|
||||
|
||||
void ipoib_reap_ah(void *dev_ptr)
|
||||
void ipoib_reap_ah(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = dev_ptr;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
__ipoib_reap_ah(dev);
|
||||
|
||||
|
@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ipoib_ib_dev_flush(void *_dev)
|
||||
void ipoib_ib_dev_flush(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)_dev;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
|
||||
struct ipoib_dev_priv *cpriv, *priv =
|
||||
container_of(work, struct ipoib_dev_priv, flush_task);
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
|
||||
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
|
||||
|
@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev)
|
|||
*/
|
||||
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
|
||||
ipoib_ib_dev_up(dev);
|
||||
ipoib_mcast_restart_task(dev);
|
||||
ipoib_mcast_restart_task(&priv->restart_task);
|
||||
}
|
||||
|
||||
mutex_lock(&priv->vlan_mutex);
|
||||
|
||||
/* Flush any child interfaces too */
|
||||
list_for_each_entry(cpriv, &priv->child_intfs, list)
|
||||
ipoib_ib_dev_flush(cpriv->dev);
|
||||
ipoib_ib_dev_flush(&cpriv->flush_task);
|
||||
|
||||
mutex_unlock(&priv->vlan_mutex);
|
||||
}
|
||||
|
@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
|
|||
* change async notification is available.
|
||||
*/
|
||||
|
||||
void ipoib_pkey_poll(void *dev_ptr)
|
||||
void ipoib_pkey_poll(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = dev_ptr;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, pkey_task.work);
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
ipoib_pkey_dev_check_presence(dev);
|
||||
|
||||
|
|
|
@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev)
|
|||
INIT_LIST_HEAD(&priv->dead_ahs);
|
||||
INIT_LIST_HEAD(&priv->multicast_list);
|
||||
|
||||
INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
|
||||
INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
|
||||
INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
|
||||
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
|
||||
INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
|
||||
INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
|
||||
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
|
||||
INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
|
||||
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
|
||||
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
|
||||
}
|
||||
|
||||
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
|
||||
|
|
|
@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status,
|
|||
mcast->backoff = 1;
|
||||
mutex_lock(&mcast_mutex);
|
||||
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_work(ipoib_workqueue, &priv->mcast_task);
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->mcast_task, 0);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
complete(&mcast->done);
|
||||
return;
|
||||
|
@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status,
|
|||
|
||||
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
|
||||
if (status == -ETIMEDOUT)
|
||||
queue_work(ipoib_workqueue, &priv->mcast_task);
|
||||
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
|
||||
0);
|
||||
else
|
||||
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
|
||||
mcast->backoff * HZ);
|
||||
|
@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
|
|||
mcast->query_id = ret;
|
||||
}
|
||||
|
||||
void ipoib_mcast_join_task(void *dev_ptr)
|
||||
void ipoib_mcast_join_task(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = dev_ptr;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, mcast_task.work);
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
return;
|
||||
|
@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
|
|||
|
||||
mutex_lock(&mcast_mutex);
|
||||
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_work(ipoib_workqueue, &priv->mcast_task);
|
||||
queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
|
@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
void ipoib_mcast_restart_task(void *dev_ptr)
|
||||
void ipoib_mcast_restart_task(struct work_struct *work)
|
||||
{
|
||||
struct net_device *dev = dev_ptr;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, restart_task);
|
||||
struct net_device *dev = priv->dev;
|
||||
struct dev_mc_list *mclist;
|
||||
struct ipoib_mcast *mcast, *tmcast;
|
||||
LIST_HEAD(remove_list);
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
static void iser_cq_tasklet_fn(unsigned long data);
|
||||
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
|
||||
static void iser_comp_error_worker(void *data);
|
||||
static void iser_comp_error_worker(struct work_struct *work);
|
||||
|
||||
static void iser_cq_event_callback(struct ib_event *cause, void *context)
|
||||
{
|
||||
|
@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn)
|
|||
init_waitqueue_head(&ib_conn->wait);
|
||||
atomic_set(&ib_conn->post_recv_buf_count, 0);
|
||||
atomic_set(&ib_conn->post_send_buf_count, 0);
|
||||
INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
|
||||
ib_conn);
|
||||
INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
|
||||
INIT_LIST_HEAD(&ib_conn->conn_list);
|
||||
spin_lock_init(&ib_conn->lock);
|
||||
|
||||
|
@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
static void iser_comp_error_worker(void *data)
|
||||
static void iser_comp_error_worker(struct work_struct *work)
|
||||
{
|
||||
struct iser_conn *ib_conn = data;
|
||||
struct iser_conn *ib_conn =
|
||||
container_of(work, struct iser_conn, comperror_work);
|
||||
|
||||
/* getting here when the state is UP means that the conn is being *
|
||||
* terminated asynchronously from the iSCSI layer's perspective. */
|
||||
|
|
|
@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
|
|||
wait_for_completion(&target->done);
|
||||
}
|
||||
|
||||
static void srp_remove_work(void *target_ptr)
|
||||
static void srp_remove_work(struct work_struct *work)
|
||||
{
|
||||
struct srp_target_port *target = target_ptr;
|
||||
struct srp_target_port *target =
|
||||
container_of(work, struct srp_target_port, work);
|
||||
|
||||
spin_lock_irq(target->scsi_host->host_lock);
|
||||
if (target->state != SRP_TARGET_DEAD) {
|
||||
|
@ -575,7 +576,7 @@ err:
|
|||
spin_lock_irq(target->scsi_host->host_lock);
|
||||
if (target->state == SRP_TARGET_CONNECTING) {
|
||||
target->state = SRP_TARGET_DEAD;
|
||||
INIT_WORK(&target->work, srp_remove_work, target);
|
||||
INIT_WORK(&target->work, srp_remove_work);
|
||||
schedule_work(&target->work);
|
||||
}
|
||||
spin_unlock_irq(target->scsi_host->host_lock);
|
||||
|
|
|
@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd)
|
|||
* interrupt context.
|
||||
*/
|
||||
|
||||
static void atkbd_event_work(void *data)
|
||||
static void atkbd_event_work(struct work_struct *work)
|
||||
{
|
||||
struct atkbd *atkbd = data;
|
||||
struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
|
||||
|
||||
mutex_lock(&atkbd->event_mutex);
|
||||
|
||||
|
@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
|
|||
|
||||
atkbd->dev = dev;
|
||||
ps2_init(&atkbd->ps2dev, serio);
|
||||
INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
|
||||
INIT_WORK(&atkbd->event_work, atkbd_event_work);
|
||||
mutex_init(&atkbd->event_mutex);
|
||||
|
||||
switch (serio->id.type) {
|
||||
|
|
|
@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code,
|
|||
* were in.
|
||||
*/
|
||||
static void
|
||||
lkkbd_reinit (void *data)
|
||||
lkkbd_reinit (struct work_struct *work)
|
||||
{
|
||||
struct lkkbd *lk = data;
|
||||
struct lkkbd *lk = container_of(work, struct lkkbd, tq);
|
||||
int division;
|
||||
unsigned char leds_on = 0;
|
||||
unsigned char leds_off = 0;
|
||||
|
@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv)
|
|||
|
||||
lk->serio = serio;
|
||||
lk->dev = input_dev;
|
||||
INIT_WORK (&lk->tq, lkkbd_reinit, lk);
|
||||
INIT_WORK (&lk->tq, lkkbd_reinit);
|
||||
lk->bell_volume = bell_volume;
|
||||
lk->keyclick_volume = keyclick_volume;
|
||||
lk->ctrlclick_volume = ctrlclick_volume;
|
||||
|
|
|
@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
|
|||
* were in.
|
||||
*/
|
||||
|
||||
static void sunkbd_reinit(void *data)
|
||||
static void sunkbd_reinit(struct work_struct *work)
|
||||
{
|
||||
struct sunkbd *sunkbd = data;
|
||||
struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
|
||||
|
||||
wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
|
||||
|
||||
|
@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv)
|
|||
sunkbd->serio = serio;
|
||||
sunkbd->dev = input_dev;
|
||||
init_waitqueue_head(&sunkbd->wait);
|
||||
INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
|
||||
INIT_WORK(&sunkbd->tq, sunkbd_reinit);
|
||||
snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
|
||||
|
||||
serio_set_drvdata(serio, sunkbd);
|
||||
|
|
|
@ -888,9 +888,10 @@ static int psmouse_poll(struct psmouse *psmouse)
|
|||
* psmouse_resync() attempts to re-validate current protocol.
|
||||
*/
|
||||
|
||||
static void psmouse_resync(void *p)
|
||||
static void psmouse_resync(struct work_struct *work)
|
||||
{
|
||||
struct psmouse *psmouse = p, *parent = NULL;
|
||||
struct psmouse *parent = NULL, *psmouse =
|
||||
container_of(work, struct psmouse, resync_work);
|
||||
struct serio *serio = psmouse->ps2dev.serio;
|
||||
psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
|
||||
int failed = 0, enabled = 0;
|
||||
|
@ -1121,7 +1122,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
|
|||
goto out;
|
||||
|
||||
ps2_init(&psmouse->ps2dev, serio);
|
||||
INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
|
||||
INIT_WORK(&psmouse->resync_work, psmouse_resync);
|
||||
psmouse->dev = input_dev;
|
||||
snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
|
||||
|
||||
|
|
|
@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command);
|
|||
* ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
|
||||
*/
|
||||
|
||||
static void ps2_execute_scheduled_command(void *data)
|
||||
static void ps2_execute_scheduled_command(struct work_struct *work)
|
||||
{
|
||||
struct ps2work *ps2work = data;
|
||||
struct ps2work *ps2work = container_of(work, struct ps2work, work);
|
||||
|
||||
ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
|
||||
kfree(ps2work);
|
||||
|
@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
|
|||
ps2work->ps2dev = ps2dev;
|
||||
ps2work->command = command;
|
||||
memcpy(ps2work->param, param, send);
|
||||
INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work);
|
||||
INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
|
||||
|
||||
if (!schedule_work(&ps2work->work)) {
|
||||
kfree(ps2work);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue