2009-04-03 23:42:35 +08:00
|
|
|
/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public Licence
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the Licence, or (at your option) any later version.
|
2009-04-03 23:42:35 +08:00
|
|
|
*
|
|
|
|
* See Documentation/slow-work.txt
|
2009-04-03 23:42:35 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_SLOW_WORK_H
|
|
|
|
#define _LINUX_SLOW_WORK_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_SLOW_WORK
|
|
|
|
|
2009-04-03 23:42:35 +08:00
|
|
|
#include <linux/sysctl.h>
|
2009-11-20 02:10:47 +08:00
|
|
|
#include <linux/timer.h>
|
2009-04-03 23:42:35 +08:00
|
|
|
|
2009-04-03 23:42:35 +08:00
|
|
|
struct slow_work;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The operations used to support slow work items
|
|
|
|
*/
|
|
|
|
struct slow_work_ops {
|
2009-11-20 02:10:23 +08:00
|
|
|
/* owner */
|
|
|
|
struct module *owner;
|
|
|
|
|
2009-04-03 23:42:35 +08:00
|
|
|
/* get a ref on a work item
|
|
|
|
* - return 0 if successful, -ve if not
|
|
|
|
*/
|
|
|
|
int (*get_ref)(struct slow_work *work);
|
|
|
|
|
|
|
|
/* discard a ref to a work item */
|
|
|
|
void (*put_ref)(struct slow_work *work);
|
|
|
|
|
|
|
|
/* execute a work item */
|
|
|
|
void (*execute)(struct slow_work *work);
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A slow work item
|
|
|
|
* - A reference is held on the parent object by the thread pool when it is
|
|
|
|
* queued
|
|
|
|
*/
|
|
|
|
struct slow_work {
|
2009-11-20 02:10:23 +08:00
|
|
|
struct module *owner; /* the owning module */
|
2009-04-03 23:42:35 +08:00
|
|
|
unsigned long flags;
|
|
|
|
#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
|
|
|
|
#define SLOW_WORK_EXECUTING 1 /* item currently executing */
|
|
|
|
#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
|
|
|
|
#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
|
2009-11-20 02:10:43 +08:00
|
|
|
#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
|
2009-11-20 02:10:47 +08:00
|
|
|
#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
|
2009-04-03 23:42:35 +08:00
|
|
|
const struct slow_work_ops *ops; /* operations table for this item */
|
|
|
|
struct list_head link; /* link in queue */
|
|
|
|
};
|
|
|
|
|
2009-11-20 02:10:47 +08:00
|
|
|
struct delayed_slow_work {
|
|
|
|
struct slow_work work;
|
|
|
|
struct timer_list timer;
|
|
|
|
};
|
|
|
|
|
2009-04-03 23:42:35 +08:00
|
|
|
/**
|
|
|
|
* slow_work_init - Initialise a slow work item
|
|
|
|
* @work: The work item to initialise
|
|
|
|
* @ops: The operations to use to handle the slow work item
|
|
|
|
*
|
|
|
|
* Initialise a slow work item.
|
|
|
|
*/
|
|
|
|
static inline void slow_work_init(struct slow_work *work,
|
|
|
|
const struct slow_work_ops *ops)
|
|
|
|
{
|
|
|
|
work->flags = 0;
|
|
|
|
work->ops = ops;
|
|
|
|
INIT_LIST_HEAD(&work->link);
|
|
|
|
}
|
|
|
|
|
2009-11-20 02:10:47 +08:00
|
|
|
/**
|
|
|
|
* slow_work_init - Initialise a delayed slow work item
|
|
|
|
* @work: The work item to initialise
|
|
|
|
* @ops: The operations to use to handle the slow work item
|
|
|
|
*
|
|
|
|
* Initialise a delayed slow work item.
|
|
|
|
*/
|
|
|
|
static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
|
|
|
|
const struct slow_work_ops *ops)
|
|
|
|
{
|
|
|
|
init_timer(&dwork->timer);
|
|
|
|
slow_work_init(&dwork->work, ops);
|
|
|
|
}
|
|
|
|
|
2009-04-03 23:42:35 +08:00
|
|
|
/**
|
2009-04-22 06:30:32 +08:00
|
|
|
* vslow_work_init - Initialise a very slow work item
|
2009-04-03 23:42:35 +08:00
|
|
|
* @work: The work item to initialise
|
|
|
|
* @ops: The operations to use to handle the slow work item
|
|
|
|
*
|
|
|
|
* Initialise a very slow work item. This item will be restricted such that
|
|
|
|
* only a certain number of the pool threads will be able to execute items of
|
|
|
|
* this type.
|
|
|
|
*/
|
|
|
|
static inline void vslow_work_init(struct slow_work *work,
|
|
|
|
const struct slow_work_ops *ops)
|
|
|
|
{
|
|
|
|
work->flags = 1 << SLOW_WORK_VERY_SLOW;
|
|
|
|
work->ops = ops;
|
|
|
|
INIT_LIST_HEAD(&work->link);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern int slow_work_enqueue(struct slow_work *work);
|
2009-11-20 02:10:43 +08:00
|
|
|
extern void slow_work_cancel(struct slow_work *work);
|
2009-11-20 02:10:23 +08:00
|
|
|
extern int slow_work_register_user(struct module *owner);
|
|
|
|
extern void slow_work_unregister_user(struct module *owner);
|
2009-04-03 23:42:35 +08:00
|
|
|
|
2009-11-20 02:10:47 +08:00
|
|
|
extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
|
|
|
|
unsigned long delay);
|
|
|
|
|
|
|
|
static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
|
|
|
|
{
|
|
|
|
slow_work_cancel(&dwork->work);
|
|
|
|
}
|
|
|
|
|
2009-04-03 23:42:35 +08:00
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
extern ctl_table slow_work_sysctls[];
|
|
|
|
#endif
|
2009-04-03 23:42:35 +08:00
|
|
|
|
|
|
|
#endif /* CONFIG_SLOW_WORK */
|
|
|
|
#endif /* _LINUX_SLOW_WORK_H */
|