Commit 65f27f38 authored by David Howells's avatar David Howells

WorkStruct: Pass the work_struct pointer instead of context data

Pass the work_struct pointer to the work function rather than context data.
The work function can use container_of() to work out the data.

For the cases where the container of the work_struct may go away the moment the
pending bit is cleared, it is made possible to defer the release of the
structure by deferring the clearing of the pending bit.

To make this work, an extra flag is introduced into the management side of the
work_struct.  This governs auto-release of the structure upon execution.

Ordinarily, the work queue executor would release the work_struct for further
scheduling or deallocation by clearing the pending bit prior to jumping to the
work function.  This means that, unless the driver makes some guarantee itself
that the work_struct won't go away, the work function may not access anything
else in the work_struct or its container lest they be deallocated..  This is a
problem if the auxiliary data is taken away (as done by the last patch).

However, if the pending bit is *not* cleared before jumping to the work
function, then the work function *may* access the work_struct and its container
with no problems.  But then the work function must itself release the
work_struct by calling work_release().

In most cases, automatic release is fine, so this is the default.  Special
initiators exist for the non-auto-release case (ending in _NAR).
Signed-Off-By: 's avatarDavid Howells <dhowells@redhat.com>
parent 365970a1
......@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
*/
static int check_interval = 5 * 60; /* 5 minutes */
static void mcheck_timer(void *data);
static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL);
static void mcheck_timer(struct work_struct *work);
static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
static void mcheck_check_cpu(void *info)
{
......@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
do_machine_check(NULL, 0);
}
static void mcheck_timer(void *data)
static void mcheck_timer(struct work_struct *work)
{
on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
schedule_delayed_work(&mcheck_work, check_interval * HZ);
......
......@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void do_fork_idle(void *_c_idle)
void do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle = _c_idle;
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
......@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
int timeout;
unsigned long start_rip;
struct create_idle c_idle = {
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
DECLARE_WORK(work, do_fork_idle, &c_idle);
/* allocate memory for gdts of secondary cpus. Hotplug is considered */
if (!cpu_gdt_descr[cpu].address &&
......@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
* thread.
*/
if (!keventd_up() || current_is_keventd())
work.func(work.data);
c_idle.work.func(&c_idle.work);
else {
schedule_work(&work);
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
......
......@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
static void handle_cpufreq_delayed_get(void *v)
static void handle_cpufreq_delayed_get(struct work_struct *v)
{
unsigned int cpu;
for_each_online_cpu(cpu) {
......@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
static int __init cpufreq_tsc(void)
{
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER))
cpufreq_init = 1;
......
......@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
*
* FIXME! dispatch queue is not a queue at all!
*/
static void as_work_handler(void *data)
static void as_work_handler(struct work_struct *work)
{
struct request_queue *q = data;
struct as_data *ad = container_of(work, struct as_data, antic_work);
struct request_queue *q = ad->q;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
......@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
ad->antic_timer.function = as_antic_timeout;
ad->antic_timer.data = (unsigned long)q;
init_timer(&ad->antic_timer);
INIT_WORK(&ad->antic_work, as_work_handler, q);
INIT_WORK(&ad->antic_work, as_work_handler);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
......
......@@ -1841,9 +1841,11 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
return 1;
}
static void cfq_kick_queue(void *data)
static void cfq_kick_queue(struct work_struct *work)
{
request_queue_t *q = data;
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work);
request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
......@@ -1987,7 +1989,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
......
......@@ -34,7 +34,7 @@
*/
#include <scsi/scsi_cmnd.h>
static void blk_unplug_work(void *data);
static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
......@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
if (q->unplug_delay == 0)
q->unplug_delay = 1;
INIT_WORK(&q->unplug_work, blk_unplug_work, q);
INIT_WORK(&q->unplug_work, blk_unplug_work);
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
......@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
}
}
static void blk_unplug_work(void *data)
static void blk_unplug_work(struct work_struct *work)
{
request_queue_t *q = data;
request_queue_t *q = container_of(work, request_queue_t, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
......
......@@ -40,9 +40,10 @@ struct cryptomgr_param {
char template[CRYPTO_MAX_ALG_NAME];
};
static void cryptomgr_probe(void *data)
static void cryptomgr_probe(struct work_struct *work)
{
struct cryptomgr_param *param = data;
struct cryptomgr_param *param =
container_of(work, struct cryptomgr_param, work);
struct crypto_template *tmpl;
struct crypto_instance *inst;
int err;
......@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
param->larval.type = larval->alg.cra_flags;
param->larval.mask = larval->mask;
INIT_WORK(&param->work, cryptomgr_probe, param);
INIT_WORK(&param->work, cryptomgr_probe);
schedule_work(&param->work);
return NOTIFY_STOP;
......
......@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl")
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
......@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
}
static void acpi_os_execute_deferred(void *context)
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = NULL;
dpc = (struct acpi_os_dpc *)context;
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
if (!dpc) {
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
return;
......@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type,
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct work_struct *task;
ACPI_FUNCTION_TRACE("os_queue_for_execution");
......@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type,
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the tq_struct list in a
* freed by the callee. The kernel handles the work_struct list in a
* way that allows us to also free its memory inside the callee.
* Because we may want to schedule several tasks with different
* parameters we can't use the approach some kernel code uses of
* having a static tq_struct.
* We can save time and code by allocating the DPC and tq_structs
* from the same memory.
* having a static work_struct.
*/
dpc =
kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
GFP_ATOMIC);
dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
if (!dpc)
return_ACPI_STATUS(AE_NO_MEMORY);
dpc->function = function;
dpc->context = context;
task = (void *)(dpc + 1);
INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
if (!queue_work(kacpid_wq, task)) {
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
if (!queue_work(kacpid_wq, &dpc->work)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Call to queue_work() failed.\n"));
kfree(dpc);
......
......@@ -914,7 +914,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
* ata_port_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
* @fn: workqueue function to be scheduled
* @data: data value to pass to workqueue function
* @data: data for @fn to use
* @delay: delay time for workqueue function
*
* Schedule @fn(@data) for execution after @delay jiffies using
......@@ -929,7 +929,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
* LOCKING:
* Inherited from caller.
*/
void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
unsigned long delay)
{
int rc;
......@@ -937,7 +937,8 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return;
PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
PREPARE_DELAYED_WORK(&ap->port_task, fn);
ap->port_task_data = data;
rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
......@@ -4292,10 +4293,11 @@ int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
return poll_next;
}
static void ata_pio_task(void *_data)
static void ata_pio_task(struct work_struct *work)
{
struct ata_queued_cmd *qc = _data;
struct ata_port *ap = qc->ap;
struct ata_port *ap =
container_of(work, struct ata_port, port_task.work);
struct ata_queued_cmd *qc = ap->port_task_data;
u8 status;
int poll_next;
......@@ -5317,9 +5319,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
INIT_DELAYED_WORK(&ap->port_task, NULL);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
......
......@@ -3079,7 +3079,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
/**
* ata_scsi_hotplug - SCSI part of hotplug
* @data: Pointer to ATA port to perform SCSI hotplug on
* @work: Pointer to ATA port to perform SCSI hotplug on
*
* Perform SCSI part of hotplug. It's executed from a separate
* workqueue after EH completes. This is necessary because SCSI
......@@ -3089,9 +3089,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_scsi_hotplug(void *data)
void ata_scsi_hotplug(struct work_struct *work)
{
struct ata_port *ap = data;
struct ata_port *ap =
container_of(work, struct ata_port, hotplug_task.work);
int i;
if (ap->pflags & ATA_PFLAG_UNLOADING) {
......@@ -3190,7 +3191,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
/**
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
* @data: Pointer to ATA port to perform scsi_rescan_device()
* @work: Pointer to ATA port to perform scsi_rescan_device()
*
* After ATA pass thru (SAT) commands are executed successfully,
* libata need to propagate the changes to SCSI layer. This
......@@ -3200,9 +3201,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_scsi_dev_rescan(void *data)
void ata_scsi_dev_rescan(struct work_struct *work)
{
struct ata_port *ap = data;
struct ata_port *ap =
container_of(work, struct ata_port, scsi_rescan_task);
struct ata_device *dev;
unsigned int i;
......
......@@ -81,7 +81,7 @@ extern struct scsi_transport_template ata_scsi_transport_template;
extern void ata_scsi_scan_host(struct ata_port *ap);
extern int ata_scsi_offline_dev(struct ata_device *dev);
extern void ata_scsi_hotplug(void *data);
extern void ata_scsi_hotplug(struct work_struct *work);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
......@@ -111,7 +111,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(void *data);
extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_bus_probe(struct ata_port *ap);
/* libata-eh.c */
......
......@@ -992,11 +992,11 @@ static void empty(void)
{
}
static DECLARE_WORK(floppy_work, NULL, NULL);
static DECLARE_WORK(floppy_work, NULL);
static void schedule_bh(void (*handler) (void))
{
PREPARE_WORK(&floppy_work, (work_func_t)handler, NULL);
PREPARE_WORK(&floppy_work, (work_func_t)handler);
schedule_work(&floppy_work);
}
......@@ -1008,7 +1008,7 @@ static void cancel_activity(void)
spin_lock_irqsave(&floppy_lock, flags);
do_floppy = NULL;
PREPARE_WORK(&floppy_work, (work_func_t)empty, NULL);
PREPARE_WORK(&floppy_work, (work_func_t)empty);
del_timer(&fd_timer);
spin_unlock_irqrestore(&floppy_lock, flags);
}
......
......@@ -1422,9 +1422,9 @@ static struct keydata {
static unsigned int ip_cnt;
static void rekey_seq_generator(void *private_);
static void rekey_seq_generator(struct work_struct *work);
static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
/*
* Lock avoidance:
......@@ -1438,7 +1438,7 @@ static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
* happen, and even if that happens only a not perfectly compliant
* ISN is generated, nothing fatal.
*/
static void rekey_seq_generator(void *private_)
static void rekey_seq_generator(struct work_struct *work)
{
struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
......
......@@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
static void moom_callback(void *ignored)
static void moom_callback(struct work_struct *ignored)
{
out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback, NULL);
static DECLARE_WORK(moom_work, moom_callback);
static void sysrq_handle_moom(int key, struct tty_struct *tty)
{
......
......@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
/**
* do_tty_hangup - actual handler for hangup events
* @data: tty device
* @work: tty device
*
* This can be called by the "eventd" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
......@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
* tasklist_lock to walk task list for hangup event
*
*/
static void do_tty_hangup(void *data)
static void do_tty_hangup(struct work_struct *work)
{
struct tty_struct *tty = (struct tty_struct *) data;
struct tty_struct *tty =
container_of(work, struct tty_struct, hangup_work);
struct file * cons_filp = NULL;
struct file *filp, *f = NULL;
struct task_struct *p;
......@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty)
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
#endif
do_tty_hangup((void *) tty);
do_tty_hangup(&tty->hangup_work);
}
EXPORT_SYMBOL(tty_vhangup);
......@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file,
* Nasty bug: do_SAK is being called in interrupt context. This can
* deadlock. We punt it up to process context. AKPM - 16Mar2001
*/
static void __do_SAK(void *arg)
static void __do_SAK(struct work_struct *work)
{
struct tty_struct *tty =
container_of(work, struct tty_struct, SAK_work);
#ifdef TTY_SOFT_SAK
tty_hangup(tty);
#else
struct tty_struct *tty = arg;
struct task_struct *g, *p;
int session;
int i;
......@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty)
{
if (!tty)
return;
PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
PREPARE_WORK(&tty->SAK_work, __do_SAK);
schedule_work(&tty->SAK_work);
}
......@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK);
/**
* flush_to_ldisc
* @private_: tty structure passed from work queue.
* @work: tty structure passed from work queue.
*
* This routine is called out of the software interrupt to flush data
* from the buffer chain to the line discipline.
......@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK);
* receive_buf method is single threaded for each tty instance.
*/
static void flush_to_ldisc(void *private_)
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_struct *tty = (struct tty_struct *) private_;
struct tty_struct *tty =
container_of(work, struct tty_struct, buf.work.work);
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf, *head;
......@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
flush_to_ldisc((void *) tty);
flush_to_ldisc(&tty->buf.work.work);
else
schedule_delayed_work(&tty->buf.work, 1);
}
......@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty)
tty->overrun_time = jiffies;
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty);
INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
INIT_WORK(&tty->hangup_work, do_tty_hangup);
mutex_init(&tty->atomic_read_lock);
mutex_init(&tty->atomic_write_lock);
spin_lock_init(&tty->read_lock);
INIT_LIST_HEAD(&tty->tty_files);
INIT_WORK(&tty->SAK_work, NULL, NULL);
INIT_WORK(&tty->SAK_work, NULL);
}
/*
......
......@@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty);
static void set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(void *ignored);
static void console_callback(struct work_struct *ignored);
static void blank_screen_t(unsigned long dummy);
static void set_palette(struct vc_data *vc);
......@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
static int blankinterval = 10*60*HZ;
static int vesa_off_interval;
static DECLARE_WORK(console_work, console_callback, NULL);
static DECLARE_WORK(console_work, console_callback);
/*
* fg_console is the current virtual console,
......@@ -2154,7 +2154,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
* with other console code and prevention of re-entrancy is
* ensured with console_sem.
*/
static void console_callback(void *ignored)
static void console_callback(struct work_struct *ignored)
{
acquire_console_sem();
......
......@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static void handle_update(void *data);
static void handle_update(struct work_struct *work);
/**
* Two notifier lists: the "policy" list is involved in the
......@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
mutex_init(&policy->lock);
mutex_lock(&policy->lock);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
INIT_WORK(&policy->update, handle_update);
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
......@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
}
static void handle_update(void *data)
static void handle_update(struct work_struct *work)
{
unsigned int cpu = (unsigned int)(long)data;
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
unsigned int cpu = policy->cpu;
dprintk("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
}
......
......@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd)
* interrupt context.
*/
static void atkbd_event_work(void *data)
static void atkbd_event_work(struct work_struct *work)
{
struct atkbd *atkbd = data;
struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
mutex_lock(&atkbd->event_mutex);
......@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->dev = dev;
ps2_init(&atkbd->ps2dev, serio);
INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
INIT_WORK(&atkbd->event_work, atkbd_event_work);
mutex_init(&atkbd->event_mutex);
switch (serio->id.type) {
......
......@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command);
* ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
*/
static void ps2_execute_scheduled_command(void *data)
static void ps2_execute_scheduled_command(struct work_struct *work)
{
struct ps2work *ps2work = data;
struct ps2work *ps2work = container_of(work, struct ps2work, work);
ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
kfree(ps2work);
......@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
ps2work->ps2dev = ps2dev;
ps2work->command = command;
memcpy(ps2work->param, param, send);
INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work);
INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
if (!schedule_work(&ps2work->work)) {
kfree(ps2work);
......
......@@ -183,7 +183,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_tx_timeout(struct net_device *dev);
static void e1000_reset_task(struct net_device *dev);
static void e1000_reset_task(struct work_struct *work);
static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
......@@ -908,8 +908,7 @@ e1000_probe(struct pci_dev *pdev,
adapter->phy_info_timer.function = &e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->reset_task,
(void (*)(void *))e1000_reset_task, netdev);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
e1000_check_options(adapter);
......@@ -3154,9 +3153,10 @@ e1000_tx_timeout(struct net_device *netdev)
}
static void
e1000_reset_task(struct net_device *netdev)
e1000_reset_task(struct work_struct *work)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
e1000_reinit_locked(adapter);
}
......
......@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
rpc->e_lock = SPIN_LOCK_UNLOCKED;
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev);
INIT_WORK(&rpc->dpc_handler, aer_isr);
rpc->prod_idx = rpc->cons_idx = 0;
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
......
......@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type;
extern void aer_enable_rootport(struct aer_rpc *rpc);
extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(void *context);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
extern int aer_osc_setup(struct pci_dev *dev);
......
......@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device,
/**
* aer_isr - consume errors detected by root port
* @context: pointer to a private data of pcie device
* @work: definition of this work item
*
* Invoked, as DPC, when root port records new detected error
**/
void aer_isr(void *context)
void aer_isr(struct work_struct *work)
{
struct pcie_device *p_device = (struct pcie_device *) context;
struct aer_rpc *rpc = get_service_data(p_device);
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
struct pcie_device *p_device = rpc->rpd;
struct aer_err_source *e_src;
mutex_lock(&rpc->rpc_mutex);
......
......@@ -362,9 +362,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
goto retry;
}
static void scsi_target_reap_usercontext(void *data)
static void scsi_target_reap_usercontext(struct work_struct *work)
{
struct scsi_target *starget = data;
struct scsi_target *starget =
container_of(work, struct scsi_target, ew.work);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);