Merge branch 'block-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/block
* 'block-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/block: [PATCH] fix rmmod problems with elevator attributes, clean them up [PATCH] elevator_t lifetime rules and sysfs fixes [PATCH] noise removal: cfq-iosched.c [PATCH] don't bother with refcounting for cfq_data [PATCH] fix sysfs interaction and lifetime rules handling for queues [PATCH] regularize blk_cleanup_queue() use [PATCH] fix cfq_get_queue()/ioprio_set(2) races [PATCH] deal with rmmod/put_io_context() races [PATCH] stop elv_unregister() from rogering other iosched's data, fix locking [PATCH] stop cfq from pinning queue down [PATCH] make cfq_exit_queue() prune the cfq_io_context for that queue [PATCH] fix the exclusion for ioprio_set() [PATCH] keep sync and async cfq_queue separate [PATCH] switch to use of ->key to get cfq_data by cfq_io_context [PATCH] stop leaking cfq_data in cfq_set_request() [PATCH] fix cfq hash lookups [PATCH] fix locking in queue_requests_store() [PATCH] fix double-free in blk_init_queue_node() [PATCH] don't do exit_io_context() until we know we won't be doing any IO
This commit is contained in:
commit
a90779bfc8
|
@ -182,6 +182,9 @@ struct as_rq {
|
||||||
|
|
||||||
static kmem_cache_t *arq_pool;
|
static kmem_cache_t *arq_pool;
|
||||||
|
|
||||||
|
static atomic_t ioc_count = ATOMIC_INIT(0);
|
||||||
|
static struct completion *ioc_gone;
|
||||||
|
|
||||||
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
|
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
|
||||||
static void as_antic_stop(struct as_data *ad);
|
static void as_antic_stop(struct as_data *ad);
|
||||||
|
|
||||||
|
@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad);
|
||||||
static void free_as_io_context(struct as_io_context *aic)
|
static void free_as_io_context(struct as_io_context *aic)
|
||||||
{
|
{
|
||||||
kfree(aic);
|
kfree(aic);
|
||||||
|
if (atomic_dec_and_test(&ioc_count) && ioc_gone)
|
||||||
|
complete(ioc_gone);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void as_trim(struct io_context *ioc)
|
||||||
|
{
|
||||||
|
if (ioc->aic)
|
||||||
|
free_as_io_context(ioc->aic);
|
||||||
|
ioc->aic = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called when the task exits */
|
/* Called when the task exits */
|
||||||
|
@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void)
|
||||||
ret->seek_total = 0;
|
ret->seek_total = 0;
|
||||||
ret->seek_samples = 0;
|
ret->seek_samples = 0;
|
||||||
ret->seek_mean = 0;
|
ret->seek_mean = 0;
|
||||||
|
atomic_inc(&ioc_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
/*
|
/*
|
||||||
* sysfs parts below
|
* sysfs parts below
|
||||||
*/
|
*/
|
||||||
struct as_fs_entry {
|
|
||||||
struct attribute attr;
|
|
||||||
ssize_t (*show)(struct as_data *, char *);
|
|
||||||
ssize_t (*store)(struct as_data *, const char *, size_t);
|
|
||||||
};
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
as_var_show(unsigned int var, char *page)
|
as_var_show(unsigned int var, char *page)
|
||||||
|
@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t as_est_show(struct as_data *ad, char *page)
|
static ssize_t est_time_show(elevator_t *e, char *page)
|
||||||
{
|
{
|
||||||
|
struct as_data *ad = e->elevator_data;
|
||||||
int pos = 0;
|
int pos = 0;
|
||||||
|
|
||||||
pos += sprintf(page+pos, "%lu %% exit probability\n",
|
pos += sprintf(page+pos, "%lu %% exit probability\n",
|
||||||
|
@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FUNCTION(__FUNC, __VAR) \
|
#define SHOW_FUNCTION(__FUNC, __VAR) \
|
||||||
static ssize_t __FUNC(struct as_data *ad, char *page) \
|
static ssize_t __FUNC(elevator_t *e, char *page) \
|
||||||
{ \
|
{ \
|
||||||
|
struct as_data *ad = e->elevator_data; \
|
||||||
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
|
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
|
||||||
}
|
}
|
||||||
SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
|
SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
|
||||||
SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
|
SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
|
||||||
SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
|
SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
|
||||||
SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
|
SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
|
||||||
SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
|
SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
|
||||||
static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
|
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
int ret = as_var_store(__PTR, (page), count); \
|
struct as_data *ad = e->elevator_data; \
|
||||||
|
int ret = as_var_store(__PTR, (page), count); \
|
||||||
if (*(__PTR) < (MIN)) \
|
if (*(__PTR) < (MIN)) \
|
||||||
*(__PTR) = (MIN); \
|
*(__PTR) = (MIN); \
|
||||||
else if (*(__PTR) > (MAX)) \
|
else if (*(__PTR) > (MAX)) \
|
||||||
|
@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
|
||||||
*(__PTR) = msecs_to_jiffies(*(__PTR)); \
|
*(__PTR) = msecs_to_jiffies(*(__PTR)); \
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
|
STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
|
||||||
STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
|
STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
|
||||||
STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
|
STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
|
||||||
STORE_FUNCTION(as_read_batchexpire_store,
|
STORE_FUNCTION(as_read_batch_expire_store,
|
||||||
&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
|
&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
|
||||||
STORE_FUNCTION(as_write_batchexpire_store,
|
STORE_FUNCTION(as_write_batch_expire_store,
|
||||||
&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
|
&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
|
||||||
#undef STORE_FUNCTION
|
#undef STORE_FUNCTION
|
||||||
|
|
||||||
static struct as_fs_entry as_est_entry = {
|
#define AS_ATTR(name) \
|
||||||
.attr = {.name = "est_time", .mode = S_IRUGO },
|
__ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
|
||||||
.show = as_est_show,
|
|
||||||
};
|
|
||||||
static struct as_fs_entry as_readexpire_entry = {
|
|
||||||
.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = as_readexpire_show,
|
|
||||||
.store = as_readexpire_store,
|
|
||||||
};
|
|
||||||
static struct as_fs_entry as_writeexpire_entry = {
|
|
||||||
.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = as_writeexpire_show,
|
|
||||||
.store = as_writeexpire_store,
|
|
||||||
};
|
|
||||||
static struct as_fs_entry as_anticexpire_entry = {
|
|
||||||
.attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = as_anticexpire_show,
|
|
||||||
.store = as_anticexpire_store,
|
|
||||||
};
|
|
||||||
static struct as_fs_entry as_read_batchexpire_entry = {
|
|
||||||
.attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = as_read_batchexpire_show,
|
|
||||||
.store = as_read_batchexpire_store,
|
|
||||||
};
|
|
||||||
static struct as_fs_entry as_write_batchexpire_entry = {
|
|
||||||
.attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = as_write_batchexpire_show,
|
|
||||||
.store = as_write_batchexpire_store,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct attribute *default_attrs[] = {
|
static struct elv_fs_entry as_attrs[] = {
|
||||||
&as_est_entry.attr,
|
__ATTR_RO(est_time),
|
||||||
&as_readexpire_entry.attr,
|
AS_ATTR(read_expire),
|
||||||
&as_writeexpire_entry.attr,
|
AS_ATTR(write_expire),
|
||||||
&as_anticexpire_entry.attr,
|
AS_ATTR(antic_expire),
|
||||||
&as_read_batchexpire_entry.attr,
|
AS_ATTR(read_batch_expire),
|
||||||
&as_write_batchexpire_entry.attr,
|
AS_ATTR(write_batch_expire),
|
||||||
NULL,
|
__ATTR_NULL
|
||||||
};
|
|
||||||
|
|
||||||
#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
|
|
||||||
|
|
||||||
static ssize_t
|
|
||||||
as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
|
||||||
{
|
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct as_fs_entry *entry = to_as(attr);
|
|
||||||
|
|
||||||
if (!entry->show)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
return entry->show(e->elevator_data, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
|
||||||
as_attr_store(struct kobject *kobj, struct attribute *attr,
|
|
||||||
const char *page, size_t length)
|
|
||||||
{
|
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct as_fs_entry *entry = to_as(attr);
|
|
||||||
|
|
||||||
if (!entry->store)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
return entry->store(e->elevator_data, page, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sysfs_ops as_sysfs_ops = {
|
|
||||||
.show = as_attr_show,
|
|
||||||
.store = as_attr_store,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct kobj_type as_ktype = {
|
|
||||||
.sysfs_ops = &as_sysfs_ops,
|
|
||||||
.default_attrs = default_attrs,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct elevator_type iosched_as = {
|
static struct elevator_type iosched_as = {
|
||||||
|
@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = {
|
||||||
.elevator_may_queue_fn = as_may_queue,
|
.elevator_may_queue_fn = as_may_queue,
|
||||||
.elevator_init_fn = as_init_queue,
|
.elevator_init_fn = as_init_queue,
|
||||||
.elevator_exit_fn = as_exit_queue,
|
.elevator_exit_fn = as_exit_queue,
|
||||||
|
.trim = as_trim,
|
||||||
},
|
},
|
||||||
|
|
||||||
.elevator_ktype = &as_ktype,
|
.elevator_attrs = as_attrs,
|
||||||
.elevator_name = "anticipatory",
|
.elevator_name = "anticipatory",
|
||||||
.elevator_owner = THIS_MODULE,
|
.elevator_owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
@ -1893,7 +1841,13 @@ static int __init as_init(void)
|
||||||
|
|
||||||
static void __exit as_exit(void)
|
static void __exit as_exit(void)
|
||||||
{
|
{
|
||||||
|
DECLARE_COMPLETION(all_gone);
|
||||||
elv_unregister(&iosched_as);
|
elv_unregister(&iosched_as);
|
||||||
|
ioc_gone = &all_gone;
|
||||||
|
barrier();
|
||||||
|
if (atomic_read(&ioc_count))
|
||||||
|
complete(ioc_gone);
|
||||||
|
synchronize_rcu();
|
||||||
kmem_cache_destroy(arq_pool);
|
kmem_cache_destroy(arq_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,21 +6,13 @@
|
||||||
*
|
*
|
||||||
* Copyright (C) 2003 Jens Axboe <axboe@suse.de>
|
* Copyright (C) 2003 Jens Axboe <axboe@suse.de>
|
||||||
*/
|
*/
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/fs.h>
|
|
||||||
#include <linux/blkdev.h>
|
|
||||||
#include <linux/elevator.h>
|
|
||||||
#include <linux/bio.h>
|
|
||||||
#include <linux/config.h>
|
#include <linux/config.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/init.h>
|
#include <linux/elevator.h>
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include <linux/mempool.h>
|
|
||||||
#include <linux/ioprio.h>
|
#include <linux/ioprio.h>
|
||||||
#include <linux/writeback.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tunables
|
* tunables
|
||||||
|
@ -47,6 +39,8 @@ static int cfq_slice_idle = HZ / 100;
|
||||||
*/
|
*/
|
||||||
static const int cfq_max_depth = 2;
|
static const int cfq_max_depth = 2;
|
||||||
|
|
||||||
|
static DEFINE_RWLOCK(cfq_exit_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for the hash of cfqq inside the cfqd
|
* for the hash of cfqq inside the cfqd
|
||||||
*/
|
*/
|
||||||
|
@ -89,6 +83,9 @@ static kmem_cache_t *crq_pool;
|
||||||
static kmem_cache_t *cfq_pool;
|
static kmem_cache_t *cfq_pool;
|
||||||
static kmem_cache_t *cfq_ioc_pool;
|
static kmem_cache_t *cfq_ioc_pool;
|
||||||
|
|
||||||
|
static atomic_t ioc_count = ATOMIC_INIT(0);
|
||||||
|
static struct completion *ioc_gone;
|
||||||
|
|
||||||
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
|
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
|
||||||
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
|
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
|
||||||
#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
|
#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
|
||||||
|
@ -109,7 +106,6 @@ static kmem_cache_t *cfq_ioc_pool;
|
||||||
* Per block device queue structure
|
* Per block device queue structure
|
||||||
*/
|
*/
|
||||||
struct cfq_data {
|
struct cfq_data {
|
||||||
atomic_t ref;
|
|
||||||
request_queue_t *queue;
|
request_queue_t *queue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -175,6 +171,8 @@ struct cfq_data {
|
||||||
unsigned int cfq_slice_async_rq;
|
unsigned int cfq_slice_async_rq;
|
||||||
unsigned int cfq_slice_idle;
|
unsigned int cfq_slice_idle;
|
||||||
unsigned int cfq_max_depth;
|
unsigned int cfq_max_depth;
|
||||||
|
|
||||||
|
struct list_head cic_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -288,7 +286,7 @@ CFQ_CRQ_FNS(is_sync);
|
||||||
|
|
||||||
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
|
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
|
||||||
static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
|
static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
|
||||||
static void cfq_put_cfqd(struct cfq_data *cfqd);
|
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
|
||||||
|
|
||||||
#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
|
#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
|
||||||
|
|
||||||
|
@ -1160,8 +1158,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||||
if (unlikely(cfqd->active_queue == cfqq))
|
if (unlikely(cfqd->active_queue == cfqq))
|
||||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||||
|
|
||||||
cfq_put_cfqd(cfqq->cfqd);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* it's on the empty list and still hashed
|
* it's on the empty list and still hashed
|
||||||
*/
|
*/
|
||||||
|
@ -1179,7 +1175,7 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
|
||||||
|
|
||||||
hlist_for_each_safe(entry, next, hash_list) {
|
hlist_for_each_safe(entry, next, hash_list) {
|
||||||
struct cfq_queue *__cfqq = list_entry_qhash(entry);
|
struct cfq_queue *__cfqq = list_entry_qhash(entry);
|
||||||
const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
|
const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
|
||||||
|
|
||||||
if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
|
if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
|
||||||
return __cfqq;
|
return __cfqq;
|
||||||
|
@ -1198,13 +1194,24 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
|
||||||
{
|
{
|
||||||
struct cfq_io_context *__cic;
|
struct cfq_io_context *__cic;
|
||||||
struct list_head *entry, *next;
|
struct list_head *entry, *next;
|
||||||
|
int freed = 1;
|
||||||
|
|
||||||
list_for_each_safe(entry, next, &cic->list) {
|
list_for_each_safe(entry, next, &cic->list) {
|
||||||
__cic = list_entry(entry, struct cfq_io_context, list);
|
__cic = list_entry(entry, struct cfq_io_context, list);
|
||||||
kmem_cache_free(cfq_ioc_pool, __cic);
|
kmem_cache_free(cfq_ioc_pool, __cic);
|
||||||
|
freed++;
|
||||||
}
|
}
|
||||||
|
|
||||||
kmem_cache_free(cfq_ioc_pool, cic);
|
kmem_cache_free(cfq_ioc_pool, cic);
|
||||||
|
if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
|
||||||
|
complete(ioc_gone);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cfq_trim(struct io_context *ioc)
|
||||||
|
{
|
||||||
|
ioc->set_ioprio = NULL;
|
||||||
|
if (ioc->cic)
|
||||||
|
cfq_free_io_context(ioc->cic);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1212,25 +1219,37 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
|
||||||
*/
|
*/
|
||||||
static void cfq_exit_single_io_context(struct cfq_io_context *cic)
|
static void cfq_exit_single_io_context(struct cfq_io_context *cic)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd = cic->cfqq->cfqd;
|
struct cfq_data *cfqd = cic->key;
|
||||||
request_queue_t *q = cfqd->queue;
|
request_queue_t *q;
|
||||||
|
|
||||||
|
if (!cfqd)
|
||||||
|
return;
|
||||||
|
|
||||||
|
q = cfqd->queue;
|
||||||
|
|
||||||
WARN_ON(!irqs_disabled());
|
WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
spin_lock(q->queue_lock);
|
spin_lock(q->queue_lock);
|
||||||
|
|
||||||
if (unlikely(cic->cfqq == cfqd->active_queue))
|
if (cic->cfqq[ASYNC]) {
|
||||||
__cfq_slice_expired(cfqd, cic->cfqq, 0);
|
if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
|
||||||
|
__cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
|
||||||
|
cfq_put_queue(cic->cfqq[ASYNC]);
|
||||||
|
cic->cfqq[ASYNC] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
cfq_put_queue(cic->cfqq);
|
if (cic->cfqq[SYNC]) {
|
||||||
cic->cfqq = NULL;
|
if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
|
||||||
|
__cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
|
||||||
|
cfq_put_queue(cic->cfqq[SYNC]);
|
||||||
|
cic->cfqq[SYNC] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
cic->key = NULL;
|
||||||
|
list_del_init(&cic->queue_list);
|
||||||
spin_unlock(q->queue_lock);
|
spin_unlock(q->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Another task may update the task cic list, if it is doing a queue lookup
|
|
||||||
* on its behalf. cfq_cic_lock excludes such concurrent updates
|
|
||||||
*/
|
|
||||||
static void cfq_exit_io_context(struct cfq_io_context *cic)
|
static void cfq_exit_io_context(struct cfq_io_context *cic)
|
||||||
{
|
{
|
||||||
struct cfq_io_context *__cic;
|
struct cfq_io_context *__cic;
|
||||||
|
@ -1242,12 +1261,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic)
|
||||||
/*
|
/*
|
||||||
* put the reference this task is holding to the various queues
|
* put the reference this task is holding to the various queues
|
||||||
*/
|
*/
|
||||||
|
read_lock(&cfq_exit_lock);
|
||||||
list_for_each(entry, &cic->list) {
|
list_for_each(entry, &cic->list) {
|
||||||
__cic = list_entry(entry, struct cfq_io_context, list);
|
__cic = list_entry(entry, struct cfq_io_context, list);
|
||||||
cfq_exit_single_io_context(__cic);
|
cfq_exit_single_io_context(__cic);
|
||||||
}
|
}
|
||||||
|
|
||||||
cfq_exit_single_io_context(cic);
|
cfq_exit_single_io_context(cic);
|
||||||
|
read_unlock(&cfq_exit_lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1258,7 +1279,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
|
|
||||||
if (cic) {
|
if (cic) {
|
||||||
INIT_LIST_HEAD(&cic->list);
|
INIT_LIST_HEAD(&cic->list);
|
||||||
cic->cfqq = NULL;
|
cic->cfqq[ASYNC] = NULL;
|
||||||
|
cic->cfqq[SYNC] = NULL;
|
||||||
cic->key = NULL;
|
cic->key = NULL;
|
||||||
cic->last_end_request = jiffies;
|
cic->last_end_request = jiffies;
|
||||||
cic->ttime_total = 0;
|
cic->ttime_total = 0;
|
||||||
|
@ -1266,6 +1288,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
cic->ttime_mean = 0;
|
cic->ttime_mean = 0;
|
||||||
cic->dtor = cfq_free_io_context;
|
cic->dtor = cfq_free_io_context;
|
||||||
cic->exit = cfq_exit_io_context;
|
cic->exit = cfq_exit_io_context;
|
||||||
|
INIT_LIST_HEAD(&cic->queue_list);
|
||||||
|
atomic_inc(&ioc_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
return cic;
|
return cic;
|
||||||
|
@ -1318,14 +1342,27 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
|
||||||
cfq_clear_cfqq_prio_changed(cfqq);
|
cfq_clear_cfqq_prio_changed(cfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void changed_ioprio(struct cfq_queue *cfqq)
|
static inline void changed_ioprio(struct cfq_io_context *cic)
|
||||||
{
|
{
|
||||||
if (cfqq) {
|
struct cfq_data *cfqd = cic->key;
|
||||||
struct cfq_data *cfqd = cfqq->cfqd;
|
struct cfq_queue *cfqq;
|
||||||
|
if (cfqd) {
|
||||||
spin_lock(cfqd->queue->queue_lock);
|
spin_lock(cfqd->queue->queue_lock);
|
||||||
cfq_mark_cfqq_prio_changed(cfqq);
|
cfqq = cic->cfqq[ASYNC];
|
||||||
cfq_init_prio_data(cfqq);
|
if (cfqq) {
|
||||||
|
struct cfq_queue *new_cfqq;
|
||||||
|
new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
|
||||||
|
cic->ioc->task, GFP_ATOMIC);
|
||||||
|
if (new_cfqq) {
|
||||||
|
cic->cfqq[ASYNC] = new_cfqq;
|
||||||
|
cfq_put_queue(cfqq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cfqq = cic->cfqq[SYNC];
|
||||||
|
if (cfqq) {
|
||||||
|
cfq_mark_cfqq_prio_changed(cfqq);
|
||||||
|
cfq_init_prio_data(cfqq);
|
||||||
|
}
|
||||||
spin_unlock(cfqd->queue->queue_lock);
|
spin_unlock(cfqd->queue->queue_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1335,24 +1372,32 @@ static inline void changed_ioprio(struct cfq_queue *cfqq)
|
||||||
*/
|
*/
|
||||||
static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
|
static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
|
||||||
{
|
{
|
||||||
struct cfq_io_context *cic = ioc->cic;
|
struct cfq_io_context *cic;
|
||||||
|
|
||||||
changed_ioprio(cic->cfqq);
|
write_lock(&cfq_exit_lock);
|
||||||
|
|
||||||
|
cic = ioc->cic;
|
||||||
|
|
||||||
|
changed_ioprio(cic);
|
||||||
|
|
||||||
list_for_each_entry(cic, &cic->list, list)
|
list_for_each_entry(cic, &cic->list, list)
|
||||||
changed_ioprio(cic->cfqq);
|
changed_ioprio(cic);
|
||||||
|
|
||||||
|
write_unlock(&cfq_exit_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cfq_queue *
|
static struct cfq_queue *
|
||||||
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
|
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
|
||||||
gfp_t gfp_mask)
|
gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
|
const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
|
||||||
struct cfq_queue *cfqq, *new_cfqq = NULL;
|
struct cfq_queue *cfqq, *new_cfqq = NULL;
|
||||||
|
unsigned short ioprio;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
|
ioprio = tsk->ioprio;
|
||||||
cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
|
cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
|
||||||
|
|
||||||
if (!cfqq) {
|
if (!cfqq) {
|
||||||
|
@ -1381,7 +1426,6 @@ retry:
|
||||||
hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
|
hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
|
||||||
atomic_set(&cfqq->ref, 0);
|
atomic_set(&cfqq->ref, 0);
|
||||||
cfqq->cfqd = cfqd;
|
cfqq->cfqd = cfqd;
|
||||||
atomic_inc(&cfqd->ref);
|
|
||||||
cfqq->service_last = 0;
|
cfqq->service_last = 0;
|
||||||
/*
|
/*
|
||||||
* set ->slice_left to allow preemption for a new process
|
* set ->slice_left to allow preemption for a new process
|
||||||
|
@ -1419,6 +1463,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
restart:
|
||||||
if ((cic = ioc->cic) == NULL) {
|
if ((cic = ioc->cic) == NULL) {
|
||||||
cic = cfq_alloc_io_context(cfqd, gfp_mask);
|
cic = cfq_alloc_io_context(cfqd, gfp_mask);
|
||||||
|
|
||||||
|
@ -1429,11 +1474,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
|
||||||
* manually increment generic io_context usage count, it
|
* manually increment generic io_context usage count, it
|
||||||
* cannot go away since we are already holding one ref to it
|
* cannot go away since we are already holding one ref to it
|
||||||
*/
|
*/
|
||||||
ioc->cic = cic;
|
|
||||||
ioc->set_ioprio = cfq_ioc_set_ioprio;
|
|
||||||
cic->ioc = ioc;
|
cic->ioc = ioc;
|
||||||
cic->key = cfqd;
|
cic->key = cfqd;
|
||||||
atomic_inc(&cfqd->ref);
|
read_lock(&cfq_exit_lock);
|
||||||
|
ioc->set_ioprio = cfq_ioc_set_ioprio;
|
||||||
|
ioc->cic = cic;
|
||||||
|
list_add(&cic->queue_list, &cfqd->cic_list);
|
||||||
|
read_unlock(&cfq_exit_lock);
|
||||||
} else {
|
} else {
|
||||||
struct cfq_io_context *__cic;
|
struct cfq_io_context *__cic;
|
||||||
|
|
||||||
|
@ -1443,6 +1490,20 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
|
||||||
if (cic->key == cfqd)
|
if (cic->key == cfqd)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (unlikely(!cic->key)) {
|
||||||
|
read_lock(&cfq_exit_lock);
|
||||||
|
if (list_empty(&cic->list))
|
||||||
|
ioc->cic = NULL;
|
||||||
|
else
|
||||||
|
ioc->cic = list_entry(cic->list.next,
|
||||||
|
struct cfq_io_context,
|
||||||
|
list);
|
||||||
|
read_unlock(&cfq_exit_lock);
|
||||||
|
kmem_cache_free(cfq_ioc_pool, cic);
|
||||||
|
atomic_dec(&ioc_count);
|
||||||
|
goto restart;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cic exists, check if we already are there. linear search
|
* cic exists, check if we already are there. linear search
|
||||||
* should be ok here, the list will usually not be more than
|
* should be ok here, the list will usually not be more than
|
||||||
|
@ -1457,6 +1518,14 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
|
||||||
cic = __cic;
|
cic = __cic;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
if (unlikely(!__cic->key)) {
|
||||||
|
read_lock(&cfq_exit_lock);
|
||||||
|
list_del(&__cic->list);
|
||||||
|
read_unlock(&cfq_exit_lock);
|
||||||
|
kmem_cache_free(cfq_ioc_pool, __cic);
|
||||||
|
atomic_dec(&ioc_count);
|
||||||
|
goto restart;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1469,8 +1538,10 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
|
||||||
|
|
||||||
__cic->ioc = ioc;
|
__cic->ioc = ioc;
|
||||||
__cic->key = cfqd;
|
__cic->key = cfqd;
|
||||||
atomic_inc(&cfqd->ref);
|
read_lock(&cfq_exit_lock);
|
||||||
list_add(&__cic->list, &cic->list);
|
list_add(&__cic->list, &cic->list);
|
||||||
|
list_add(&__cic->queue_list, &cfqd->cic_list);
|
||||||
|
read_unlock(&cfq_exit_lock);
|
||||||
cic = __cic;
|
cic = __cic;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1890,6 +1961,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
|
||||||
struct cfq_queue *cfqq;
|
struct cfq_queue *cfqq;
|
||||||
struct cfq_rq *crq;
|
struct cfq_rq *crq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int is_sync = key != CFQ_KEY_ASYNC;
|
||||||
|
|
||||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||||
|
|
||||||
|
@ -1900,14 +1972,14 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
|
||||||
if (!cic)
|
if (!cic)
|
||||||
goto queue_fail;
|
goto queue_fail;
|
||||||
|
|
||||||
if (!cic->cfqq) {
|
if (!cic->cfqq[is_sync]) {
|
||||||
cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
|
cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
|
||||||
if (!cfqq)
|
if (!cfqq)
|
||||||
goto queue_fail;
|
goto queue_fail;
|
||||||
|
|
||||||
cic->cfqq = cfqq;
|
cic->cfqq[is_sync] = cfqq;
|
||||||
} else
|
} else
|
||||||
cfqq = cic->cfqq;
|
cfqq = cic->cfqq[is_sync];
|
||||||
|
|
||||||
cfqq->allocated[rw]++;
|
cfqq->allocated[rw]++;
|
||||||
cfq_clear_cfqq_must_alloc(cfqq);
|
cfq_clear_cfqq_must_alloc(cfqq);
|
||||||
|
@ -1924,7 +1996,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
|
||||||
crq->cfq_queue = cfqq;
|
crq->cfq_queue = cfqq;
|
||||||
crq->io_context = cic;
|
crq->io_context = cic;
|
||||||
|
|
||||||
if (rw == READ || process_sync(tsk))
|
if (is_sync)
|
||||||
cfq_mark_crq_is_sync(crq);
|
cfq_mark_crq_is_sync(crq);
|
||||||
else
|
else
|
||||||
cfq_clear_crq_is_sync(crq);
|
cfq_clear_crq_is_sync(crq);
|
||||||
|
@ -2055,15 +2127,35 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
||||||
blk_sync_queue(cfqd->queue);
|
blk_sync_queue(cfqd->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_put_cfqd(struct cfq_data *cfqd)
|
static void cfq_exit_queue(elevator_t *e)
|
||||||
{
|
{
|
||||||
|
struct cfq_data *cfqd = e->elevator_data;
|
||||||
request_queue_t *q = cfqd->queue;
|
request_queue_t *q = cfqd->queue;
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&cfqd->ref))
|
cfq_shutdown_timer_wq(cfqd);
|
||||||
return;
|
write_lock(&cfq_exit_lock);
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
if (cfqd->active_queue)
|
||||||
|
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
|
||||||
|
while(!list_empty(&cfqd->cic_list)) {
|
||||||
|
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
|
||||||
|
struct cfq_io_context,
|
||||||
|
queue_list);
|
||||||
|
if (cic->cfqq[ASYNC]) {
|
||||||
|
cfq_put_queue(cic->cfqq[ASYNC]);
|
||||||
|
cic->cfqq[ASYNC] = NULL;
|
||||||
|
}
|
||||||
|
if (cic->cfqq[SYNC]) {
|
||||||
|
cfq_put_queue(cic->cfqq[SYNC]);
|
||||||
|
cic->cfqq[SYNC] = NULL;
|
||||||
|
}
|
||||||
|
cic->key = NULL;
|
||||||
|
list_del_init(&cic->queue_list);
|
||||||
|
}
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
write_unlock(&cfq_exit_lock);
|
||||||
|
|
||||||
cfq_shutdown_timer_wq(cfqd);
|
cfq_shutdown_timer_wq(cfqd);
|
||||||
blk_put_queue(q);
|
|
||||||
|
|
||||||
mempool_destroy(cfqd->crq_pool);
|
mempool_destroy(cfqd->crq_pool);
|
||||||
kfree(cfqd->crq_hash);
|
kfree(cfqd->crq_hash);
|
||||||
|
@ -2071,14 +2163,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
|
||||||
kfree(cfqd);
|
kfree(cfqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_exit_queue(elevator_t *e)
|
|
||||||
{
|
|
||||||
struct cfq_data *cfqd = e->elevator_data;
|
|
||||||
|
|
||||||
cfq_shutdown_timer_wq(cfqd);
|
|
||||||
cfq_put_cfqd(cfqd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd;
|
struct cfq_data *cfqd;
|
||||||
|
@ -2097,6 +2181,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
INIT_LIST_HEAD(&cfqd->cur_rr);
|
INIT_LIST_HEAD(&cfqd->cur_rr);
|
||||||
INIT_LIST_HEAD(&cfqd->idle_rr);
|
INIT_LIST_HEAD(&cfqd->idle_rr);
|
||||||
INIT_LIST_HEAD(&cfqd->empty_list);
|
INIT_LIST_HEAD(&cfqd->empty_list);
|
||||||
|
INIT_LIST_HEAD(&cfqd->cic_list);
|
||||||
|
|
||||||
cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
|
cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
|
||||||
if (!cfqd->crq_hash)
|
if (!cfqd->crq_hash)
|
||||||
|
@ -2118,7 +2203,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
e->elevator_data = cfqd;
|
e->elevator_data = cfqd;
|
||||||
|
|
||||||
cfqd->queue = q;
|
cfqd->queue = q;
|
||||||
atomic_inc(&q->refcnt);
|
|
||||||
|
|
||||||
cfqd->max_queued = q->nr_requests / 4;
|
cfqd->max_queued = q->nr_requests / 4;
|
||||||
q->nr_batching = cfq_queued;
|
q->nr_batching = cfq_queued;
|
||||||
|
@ -2133,8 +2217,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
|
|
||||||
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
|
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
|
||||||
|
|
||||||
atomic_set(&cfqd->ref, 1);
|
|
||||||
|
|
||||||
cfqd->cfq_queued = cfq_queued;
|
cfqd->cfq_queued = cfq_queued;
|
||||||
cfqd->cfq_quantum = cfq_quantum;
|
cfqd->cfq_quantum = cfq_quantum;
|
||||||
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
||||||
|
@ -2193,11 +2275,6 @@ fail:
|
||||||
/*
|
/*
|
||||||
* sysfs parts below -->
|
* sysfs parts below -->
|
||||||
*/
|
*/
|
||||||
struct cfq_fs_entry {
|
|
||||||
struct attribute attr;
|
|
||||||
ssize_t (*show)(struct cfq_data *, char *);
|
|
||||||
ssize_t (*store)(struct cfq_data *, const char *, size_t);
|
|
||||||
};
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
cfq_var_show(unsigned int var, char *page)
|
cfq_var_show(unsigned int var, char *page)
|
||||||
|
@ -2215,8 +2292,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
||||||
static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
|
static ssize_t __FUNC(elevator_t *e, char *page) \
|
||||||
{ \
|
{ \
|
||||||
|
struct cfq_data *cfqd = e->elevator_data; \
|
||||||
unsigned int __data = __VAR; \
|
unsigned int __data = __VAR; \
|
||||||
if (__CONV) \
|
if (__CONV) \
|
||||||
__data = jiffies_to_msecs(__data); \
|
__data = jiffies_to_msecs(__data); \
|
||||||
|
@ -2226,8 +2304,8 @@ SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
|
||||||
SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
|
SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
|
||||||
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
|
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
|
||||||
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
|
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
|
||||||
SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
|
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
|
||||||
SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
|
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
|
||||||
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
|
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
|
||||||
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
|
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
|
||||||
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
|
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
|
||||||
|
@ -2236,8 +2314,9 @@ SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
||||||
static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
|
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
|
struct cfq_data *cfqd = e->elevator_data; \
|
||||||
unsigned int __data; \
|
unsigned int __data; \
|
||||||
int ret = cfq_var_store(&__data, (page), count); \
|
int ret = cfq_var_store(&__data, (page), count); \
|
||||||
if (__data < (MIN)) \
|
if (__data < (MIN)) \
|
||||||
|
@ -2254,8 +2333,8 @@ STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
|
||||||
STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
|
STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
|
||||||
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
|
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
|
||||||
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
|
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
|
||||||
STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
|
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
|
||||||
STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
|
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
|
||||||
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
|
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
|
||||||
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
|
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
|
||||||
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
|
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
|
||||||
|
@ -2263,112 +2342,22 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX,
|
||||||
STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
|
STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
|
||||||
#undef STORE_FUNCTION
|
#undef STORE_FUNCTION
|
||||||
|
|
||||||
static struct cfq_fs_entry cfq_quantum_entry = {
|
#define CFQ_ATTR(name) \
|
||||||
.attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
|
__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
|
||||||
.show = cfq_quantum_show,
|
|
||||||
.store = cfq_quantum_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_queued_entry = {
|
|
||||||
.attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_queued_show,
|
|
||||||
.store = cfq_queued_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
|
|
||||||
.attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_fifo_expire_sync_show,
|
|
||||||
.store = cfq_fifo_expire_sync_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
|
|
||||||
.attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_fifo_expire_async_show,
|
|
||||||
.store = cfq_fifo_expire_async_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_back_max_entry = {
|
|
||||||
.attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_back_max_show,
|
|
||||||
.store = cfq_back_max_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_back_penalty_entry = {
|
|
||||||
.attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_back_penalty_show,
|
|
||||||
.store = cfq_back_penalty_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_slice_sync_entry = {
|
|
||||||
.attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_slice_sync_show,
|
|
||||||
.store = cfq_slice_sync_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_slice_async_entry = {
|
|
||||||
.attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_slice_async_show,
|
|
||||||
.store = cfq_slice_async_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_slice_async_rq_entry = {
|
|
||||||
.attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_slice_async_rq_show,
|
|
||||||
.store = cfq_slice_async_rq_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_slice_idle_entry = {
|
|
||||||
.attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_slice_idle_show,
|
|
||||||
.store = cfq_slice_idle_store,
|
|
||||||
};
|
|
||||||
static struct cfq_fs_entry cfq_max_depth_entry = {
|
|
||||||
.attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = cfq_max_depth_show,
|
|
||||||
.store = cfq_max_depth_store,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct attribute *default_attrs[] = {
|
static struct elv_fs_entry cfq_attrs[] = {
|
||||||
&cfq_quantum_entry.attr,
|
CFQ_ATTR(quantum),
|
||||||
&cfq_queued_entry.attr,
|
CFQ_ATTR(queued),
|
||||||
&cfq_fifo_expire_sync_entry.attr,
|
CFQ_ATTR(fifo_expire_sync),
|
||||||
&cfq_fifo_expire_async_entry.attr,
|
CFQ_ATTR(fifo_expire_async),
|
||||||
&cfq_back_max_entry.attr,
|
CFQ_ATTR(back_seek_max),
|
||||||
&cfq_back_penalty_entry.attr,
|
CFQ_ATTR(back_seek_penalty),
|
||||||
&cfq_slice_sync_entry.attr,
|
CFQ_ATTR(slice_sync),
|
||||||
&cfq_slice_async_entry.attr,
|
CFQ_ATTR(slice_async),
|
||||||
&cfq_slice_async_rq_entry.attr,
|
CFQ_ATTR(slice_async_rq),
|
||||||
&cfq_slice_idle_entry.attr,
|
CFQ_ATTR(slice_idle),
|
||||||
&cfq_max_depth_entry.attr,
|
CFQ_ATTR(max_depth),
|
||||||
NULL,
|
__ATTR_NULL
|
||||||
};
|
|
||||||
|
|
||||||
#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
|
|
||||||
|
|
||||||
static ssize_t
|
|
||||||
cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
|
||||||
{
|
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct cfq_fs_entry *entry = to_cfq(attr);
|
|
||||||
|
|
||||||
if (!entry->show)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
return entry->show(e->elevator_data, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
|
||||||
cfq_attr_store(struct kobject *kobj, struct attribute *attr,
|
|
||||||
const char *page, size_t length)
|
|
||||||
{
|
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct cfq_fs_entry *entry = to_cfq(attr);
|
|
||||||
|
|
||||||
if (!entry->store)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
return entry->store(e->elevator_data, page, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sysfs_ops cfq_sysfs_ops = {
|
|
||||||
.show = cfq_attr_show,
|
|
||||||
.store = cfq_attr_store,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct kobj_type cfq_ktype = {
|
|
||||||
.sysfs_ops = &cfq_sysfs_ops,
|
|
||||||
.default_attrs = default_attrs,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct elevator_type iosched_cfq = {
|
static struct elevator_type iosched_cfq = {
|
||||||
|
@ -2389,8 +2378,9 @@ static struct elevator_type iosched_cfq = {
|
||||||
.elevator_may_queue_fn = cfq_may_queue,
|
.elevator_may_queue_fn = cfq_may_queue,
|
||||||
.elevator_init_fn = cfq_init_queue,
|
.elevator_init_fn = cfq_init_queue,
|
||||||
.elevator_exit_fn = cfq_exit_queue,
|
.elevator_exit_fn = cfq_exit_queue,
|
||||||
|
.trim = cfq_trim,
|
||||||
},
|
},
|
||||||
.elevator_ktype = &cfq_ktype,
|
.elevator_attrs = cfq_attrs,
|
||||||
.elevator_name = "cfq",
|
.elevator_name = "cfq",
|
||||||
.elevator_owner = THIS_MODULE,
|
.elevator_owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
@ -2419,7 +2409,13 @@ static int __init cfq_init(void)
|
||||||
|
|
||||||
static void __exit cfq_exit(void)
|
static void __exit cfq_exit(void)
|
||||||
{
|
{
|
||||||
|
DECLARE_COMPLETION(all_gone);
|
||||||
elv_unregister(&iosched_cfq);
|
elv_unregister(&iosched_cfq);
|
||||||
|
ioc_gone = &all_gone;
|
||||||
|
barrier();
|
||||||
|
if (atomic_read(&ioc_count))
|
||||||
|
complete(ioc_gone);
|
||||||
|
synchronize_rcu();
|
||||||
cfq_slab_kill();
|
cfq_slab_kill();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
|
||||||
/*
|
/*
|
||||||
* sysfs parts below
|
* sysfs parts below
|
||||||
*/
|
*/
|
||||||
struct deadline_fs_entry {
|
|
||||||
struct attribute attr;
|
|
||||||
ssize_t (*show)(struct deadline_data *, char *);
|
|
||||||
ssize_t (*store)(struct deadline_data *, const char *, size_t);
|
|
||||||
};
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
deadline_var_show(int var, char *page)
|
deadline_var_show(int var, char *page)
|
||||||
|
@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
||||||
static ssize_t __FUNC(struct deadline_data *dd, char *page) \
|
static ssize_t __FUNC(elevator_t *e, char *page) \
|
||||||
{ \
|
{ \
|
||||||
int __data = __VAR; \
|
struct deadline_data *dd = e->elevator_data; \
|
||||||
|
int __data = __VAR; \
|
||||||
if (__CONV) \
|
if (__CONV) \
|
||||||
__data = jiffies_to_msecs(__data); \
|
__data = jiffies_to_msecs(__data); \
|
||||||
return deadline_var_show(__data, (page)); \
|
return deadline_var_show(__data, (page)); \
|
||||||
}
|
}
|
||||||
SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
|
SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
|
||||||
SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
|
SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
|
||||||
SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
|
SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
|
||||||
SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
|
SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
|
||||||
SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
|
SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
||||||
static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
|
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
|
struct deadline_data *dd = e->elevator_data; \
|
||||||
int __data; \
|
int __data; \
|
||||||
int ret = deadline_var_store(&__data, (page), count); \
|
int ret = deadline_var_store(&__data, (page), count); \
|
||||||
if (__data < (MIN)) \
|
if (__data < (MIN)) \
|
||||||
|
@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)
|
||||||
*(__PTR) = __data; \
|
*(__PTR) = __data; \
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
|
STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
|
||||||
STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
|
STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
|
||||||
STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
|
STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
|
||||||
STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
|
STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
|
||||||
STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
|
STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
|
||||||
#undef STORE_FUNCTION
|
#undef STORE_FUNCTION
|
||||||
|
|
||||||
static struct deadline_fs_entry deadline_readexpire_entry = {
|
#define DD_ATTR(name) \
|
||||||
.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
|
__ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
|
||||||
.show = deadline_readexpire_show,
|
deadline_##name##_store)
|
||||||
.store = deadline_readexpire_store,
|
|
||||||
};
|
|
||||||
static struct deadline_fs_entry deadline_writeexpire_entry = {
|
|
||||||
.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = deadline_writeexpire_show,
|
|
||||||
.store = deadline_writeexpire_store,
|
|
||||||
};
|
|
||||||
static struct deadline_fs_entry deadline_writesstarved_entry = {
|
|
||||||
.attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = deadline_writesstarved_show,
|
|
||||||
.store = deadline_writesstarved_store,
|
|
||||||
};
|
|
||||||
static struct deadline_fs_entry deadline_frontmerges_entry = {
|
|
||||||
.attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = deadline_frontmerges_show,
|
|
||||||
.store = deadline_frontmerges_store,
|
|
||||||
};
|
|
||||||
static struct deadline_fs_entry deadline_fifobatch_entry = {
|
|
||||||
.attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
|
|
||||||
.show = deadline_fifobatch_show,
|
|
||||||
.store = deadline_fifobatch_store,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct attribute *default_attrs[] = {
|
static struct elv_fs_entry deadline_attrs[] = {
|
||||||
&deadline_readexpire_entry.attr,
|
DD_ATTR(read_expire),
|
||||||
&deadline_writeexpire_entry.attr,
|
DD_ATTR(write_expire),
|
||||||
&deadline_writesstarved_entry.attr,
|
DD_ATTR(writes_starved),
|
||||||
&deadline_frontmerges_entry.attr,
|
DD_ATTR(front_merges),
|
||||||
&deadline_fifobatch_entry.attr,
|
DD_ATTR(fifo_batch),
|
||||||
NULL,
|
__ATTR_NULL
|
||||||
};
|
|
||||||
|
|
||||||
#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
|
|
||||||
|
|
||||||
static ssize_t
|
|
||||||
deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
|
||||||
{
|
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct deadline_fs_entry *entry = to_deadline(attr);
|
|
||||||
|
|
||||||
if (!entry->show)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
return entry->show(e->elevator_data, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t
|
|
||||||
deadline_attr_store(struct kobject *kobj, struct attribute *attr,
|
|
||||||
const char *page, size_t length)
|
|
||||||
{
|
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct deadline_fs_entry *entry = to_deadline(attr);
|
|
||||||
|
|
||||||
if (!entry->store)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
return entry->store(e->elevator_data, page, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sysfs_ops deadline_sysfs_ops = {
|
|
||||||
.show = deadline_attr_show,
|
|
||||||
.store = deadline_attr_store,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct kobj_type deadline_ktype = {
|
|
||||||
.sysfs_ops = &deadline_sysfs_ops,
|
|
||||||
.default_attrs = default_attrs,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct elevator_type iosched_deadline = {
|
static struct elevator_type iosched_deadline = {
|
||||||
|
@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = {
|
||||||
.elevator_exit_fn = deadline_exit_queue,
|
.elevator_exit_fn = deadline_exit_queue,
|
||||||
},
|
},
|
||||||
|
|
||||||
.elevator_ktype = &deadline_ktype,
|
.elevator_attrs = deadline_attrs,
|
||||||
.elevator_name = "deadline",
|
.elevator_name = "deadline",
|
||||||
.elevator_owner = THIS_MODULE,
|
.elevator_owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
169
block/elevator.c
169
block/elevator.c
|
@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name)
|
||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int elevator_attach(request_queue_t *q, struct elevator_type *e,
|
static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
|
||||||
struct elevator_queue *eq)
|
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
memset(eq, 0, sizeof(*eq));
|
|
||||||
eq->ops = &e->ops;
|
|
||||||
eq->elevator_type = e;
|
|
||||||
|
|
||||||
q->elevator = eq;
|
q->elevator = eq;
|
||||||
|
|
||||||
if (eq->ops->elevator_init_fn)
|
if (eq->ops->elevator_init_fn)
|
||||||
|
@ -154,6 +149,32 @@ static int __init elevator_setup(char *str)
|
||||||
|
|
||||||
__setup("elevator=", elevator_setup);
|
__setup("elevator=", elevator_setup);
|
||||||
|
|
||||||
|
static struct kobj_type elv_ktype;
|
||||||
|
|
||||||
|
static elevator_t *elevator_alloc(struct elevator_type *e)
|
||||||
|
{
|
||||||
|
elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
|
||||||
|
if (eq) {
|
||||||
|
memset(eq, 0, sizeof(*eq));
|
||||||
|
eq->ops = &e->ops;
|
||||||
|
eq->elevator_type = e;
|
||||||
|
kobject_init(&eq->kobj);
|
||||||
|
snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
|
||||||
|
eq->kobj.ktype = &elv_ktype;
|
||||||
|
mutex_init(&eq->sysfs_lock);
|
||||||
|
} else {
|
||||||
|
elevator_put(e);
|
||||||
|
}
|
||||||
|
return eq;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void elevator_release(struct kobject *kobj)
|
||||||
|
{
|
||||||
|
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
||||||
|
elevator_put(e->elevator_type);
|
||||||
|
kfree(e);
|
||||||
|
}
|
||||||
|
|
||||||
int elevator_init(request_queue_t *q, char *name)
|
int elevator_init(request_queue_t *q, char *name)
|
||||||
{
|
{
|
||||||
struct elevator_type *e = NULL;
|
struct elevator_type *e = NULL;
|
||||||
|
@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name)
|
||||||
e = elevator_get("noop");
|
e = elevator_get("noop");
|
||||||
}
|
}
|
||||||
|
|
||||||
eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
|
eq = elevator_alloc(e);
|
||||||
if (!eq) {
|
if (!eq)
|
||||||
elevator_put(e);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
ret = elevator_attach(q, e, eq);
|
ret = elevator_attach(q, eq);
|
||||||
if (ret) {
|
if (ret)
|
||||||
kfree(eq);
|
kobject_put(&eq->kobj);
|
||||||
elevator_put(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void elevator_exit(elevator_t *e)
|
void elevator_exit(elevator_t *e)
|
||||||
{
|
{
|
||||||
|
mutex_lock(&e->sysfs_lock);
|
||||||
if (e->ops->elevator_exit_fn)
|
if (e->ops->elevator_exit_fn)
|
||||||
e->ops->elevator_exit_fn(e);
|
e->ops->elevator_exit_fn(e);
|
||||||
|
e->ops = NULL;
|
||||||
|
mutex_unlock(&e->sysfs_lock);
|
||||||
|
|
||||||
elevator_put(e->elevator_type);
|
kobject_put(&e->kobj);
|
||||||
e->elevator_type = NULL;
|
|
||||||
kfree(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||||
|
{
|
||||||
|
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
||||||
|
struct elv_fs_entry *entry = to_elv(attr);
|
||||||
|
ssize_t error;
|
||||||
|
|
||||||
|
if (!entry->show)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
mutex_lock(&e->sysfs_lock);
|
||||||
|
error = e->ops ? entry->show(e, page) : -ENOENT;
|
||||||
|
mutex_unlock(&e->sysfs_lock);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
elv_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||||
|
const char *page, size_t length)
|
||||||
|
{
|
||||||
|
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
||||||
|
struct elv_fs_entry *entry = to_elv(attr);
|
||||||
|
ssize_t error;
|
||||||
|
|
||||||
|
if (!entry->store)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
mutex_lock(&e->sysfs_lock);
|
||||||
|
error = e->ops ? entry->store(e, page, length) : -ENOENT;
|
||||||
|
mutex_unlock(&e->sysfs_lock);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sysfs_ops elv_sysfs_ops = {
|
||||||
|
.show = elv_attr_show,
|
||||||
|
.store = elv_attr_store,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct kobj_type elv_ktype = {
|
||||||
|
.sysfs_ops = &elv_sysfs_ops,
|
||||||
|
.release = elevator_release,
|
||||||
|
};
|
||||||
|
|
||||||
int elv_register_queue(struct request_queue *q)
|
int elv_register_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
elevator_t *e = q->elevator;
|
||||||
|
int error;
|
||||||
|
|
||||||
e->kobj.parent = kobject_get(&q->kobj);
|
e->kobj.parent = &q->kobj;
|
||||||
if (!e->kobj.parent)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
|
error = kobject_add(&e->kobj);
|
||||||
e->kobj.ktype = e->elevator_type->elevator_ktype;
|
if (!error) {
|
||||||
|
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
|
||||||
return kobject_register(&e->kobj);
|
if (attr) {
|
||||||
|
while (attr->attr.name) {
|
||||||
|
if (sysfs_create_file(&e->kobj, &attr->attr))
|
||||||
|
break;
|
||||||
|
attr++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kobject_uevent(&e->kobj, KOBJ_ADD);
|
||||||
|
}
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
void elv_unregister_queue(struct request_queue *q)
|
void elv_unregister_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q) {
|
if (q) {
|
||||||
elevator_t *e = q->elevator;
|
elevator_t *e = q->elevator;
|
||||||
kobject_unregister(&e->kobj);
|
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||||
kobject_put(&q->kobj);
|
kobject_del(&e->kobj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e)
|
||||||
/*
|
/*
|
||||||
* Iterate every thread in the process to remove the io contexts.
|
* Iterate every thread in the process to remove the io contexts.
|
||||||
*/
|
*/
|
||||||
read_lock(&tasklist_lock);
|
if (e->ops.trim) {
|
||||||
do_each_thread(g, p) {
|
read_lock(&tasklist_lock);
|
||||||
struct io_context *ioc = p->io_context;
|
do_each_thread(g, p) {
|
||||||
if (ioc && ioc->cic) {
|
task_lock(p);
|
||||||
ioc->cic->exit(ioc->cic);
|
e->ops.trim(p->io_context);
|
||||||
ioc->cic->dtor(ioc->cic);
|
task_unlock(p);
|
||||||
ioc->cic = NULL;
|
} while_each_thread(g, p);
|
||||||
}
|
read_unlock(&tasklist_lock);
|
||||||
if (ioc && ioc->aic) {
|
}
|
||||||
ioc->aic->exit(ioc->aic);
|
|
||||||
ioc->aic->dtor(ioc->aic);
|
|
||||||
ioc->aic = NULL;
|
|
||||||
}
|
|
||||||
} while_each_thread(g, p);
|
|
||||||
read_unlock(&tasklist_lock);
|
|
||||||
|
|
||||||
spin_lock_irq(&elv_list_lock);
|
spin_lock_irq(&elv_list_lock);
|
||||||
list_del_init(&e->list);
|
list_del_init(&e->list);
|
||||||
|
@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister);
|
||||||
* need for the new one. this way we have a chance of going back to the old
|
* need for the new one. this way we have a chance of going back to the old
|
||||||
* one, if the new one fails init for some reason.
|
* one, if the new one fails init for some reason.
|
||||||
*/
|
*/
|
||||||
static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||||
{
|
{
|
||||||
elevator_t *old_elevator, *e;
|
elevator_t *old_elevator, *e;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate new elevator
|
* Allocate new elevator
|
||||||
*/
|
*/
|
||||||
e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
|
e = elevator_alloc(new_e);
|
||||||
if (!e)
|
if (!e)
|
||||||
goto error;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Turn on BYPASS and drain all requests w/ elevator private data
|
* Turn on BYPASS and drain all requests w/ elevator private data
|
||||||
|
@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||||
/*
|
/*
|
||||||
* attach and start new elevator
|
* attach and start new elevator
|
||||||
*/
|
*/
|
||||||
if (elevator_attach(q, new_e, e))
|
if (elevator_attach(q, e))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (elv_register_queue(q))
|
if (elv_register_queue(q))
|
||||||
|
@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||||
*/
|
*/
|
||||||
elevator_exit(old_elevator);
|
elevator_exit(old_elevator);
|
||||||
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
||||||
return;
|
return 1;
|
||||||
|
|
||||||
fail_register:
|
fail_register:
|
||||||
/*
|
/*
|
||||||
|
@ -767,10 +832,9 @@ fail:
|
||||||
q->elevator = old_elevator;
|
q->elevator = old_elevator;
|
||||||
elv_register_queue(q);
|
elv_register_queue(q);
|
||||||
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
||||||
kfree(e);
|
if (e)
|
||||||
error:
|
kobject_put(&e->kobj);
|
||||||
elevator_put(new_e);
|
return 0;
|
||||||
printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
|
ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
|
||||||
|
@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
elevator_switch(q, e);
|
if (!elevator_switch(q, e))
|
||||||
|
printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue);
|
||||||
* Hopefully the low level driver will have finished any
|
* Hopefully the low level driver will have finished any
|
||||||
* outstanding requests first...
|
* outstanding requests first...
|
||||||
**/
|
**/
|
||||||
void blk_cleanup_queue(request_queue_t * q)
|
static void blk_release_queue(struct kobject *kobj)
|
||||||
{
|
{
|
||||||
|
request_queue_t *q = container_of(kobj, struct request_queue, kobj);
|
||||||
struct request_list *rl = &q->rq;
|
struct request_list *rl = &q->rq;
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&q->refcnt))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (q->elevator)
|
|
||||||
elevator_exit(q->elevator);
|
|
||||||
|
|
||||||
blk_sync_queue(q);
|
blk_sync_queue(q);
|
||||||
|
|
||||||
if (rl->rq_pool)
|
if (rl->rq_pool)
|
||||||
|
@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q)
|
||||||
kmem_cache_free(requestq_cachep, q);
|
kmem_cache_free(requestq_cachep, q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_put_queue(request_queue_t *q)
|
||||||
|
{
|
||||||
|
kobject_put(&q->kobj);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_put_queue);
|
||||||
|
|
||||||
|
void blk_cleanup_queue(request_queue_t * q)
|
||||||
|
{
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
|
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
|
||||||
|
if (q->elevator)
|
||||||
|
elevator_exit(q->elevator);
|
||||||
|
|
||||||
|
blk_put_queue(q);
|
||||||
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||||
|
|
||||||
static int blk_init_free_list(request_queue_t *q)
|
static int blk_init_free_list(request_queue_t *q)
|
||||||
|
@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_alloc_queue);
|
EXPORT_SYMBOL(blk_alloc_queue);
|
||||||
|
|
||||||
|
static struct kobj_type queue_ktype;
|
||||||
|
|
||||||
request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
{
|
{
|
||||||
request_queue_t *q;
|
request_queue_t *q;
|
||||||
|
@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
|
|
||||||
memset(q, 0, sizeof(*q));
|
memset(q, 0, sizeof(*q));
|
||||||
init_timer(&q->unplug_timer);
|
init_timer(&q->unplug_timer);
|
||||||
atomic_set(&q->refcnt, 1);
|
|
||||||
|
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
|
||||||
|
q->kobj.ktype = &queue_ktype;
|
||||||
|
kobject_init(&q->kobj);
|
||||||
|
|
||||||
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
|
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
|
||||||
q->backing_dev_info.unplug_io_data = q;
|
q->backing_dev_info.unplug_io_data = q;
|
||||||
|
|
||||||
|
mutex_init(&q->sysfs_lock);
|
||||||
|
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_alloc_queue_node);
|
EXPORT_SYMBOL(blk_alloc_queue_node);
|
||||||
|
@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
q->node = node_id;
|
q->node = node_id;
|
||||||
if (blk_init_free_list(q))
|
if (blk_init_free_list(q)) {
|
||||||
goto out_init;
|
kmem_cache_free(requestq_cachep, q);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if caller didn't supply a lock, they get per-queue locking with
|
* if caller didn't supply a lock, they get per-queue locking with
|
||||||
|
@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_cleanup_queue(q);
|
blk_put_queue(q);
|
||||||
out_init:
|
|
||||||
kmem_cache_free(requestq_cachep, q);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_init_queue_node);
|
EXPORT_SYMBOL(blk_init_queue_node);
|
||||||
|
@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
|
||||||
int blk_get_queue(request_queue_t *q)
|
int blk_get_queue(request_queue_t *q)
|
||||||
{
|
{
|
||||||
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
||||||
atomic_inc(&q->refcnt);
|
kobject_get(&q->kobj);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc)
|
||||||
BUG_ON(atomic_read(&ioc->refcount) == 0);
|
BUG_ON(atomic_read(&ioc->refcount) == 0);
|
||||||
|
|
||||||
if (atomic_dec_and_test(&ioc->refcount)) {
|
if (atomic_dec_and_test(&ioc->refcount)) {
|
||||||
|
rcu_read_lock();
|
||||||
if (ioc->aic && ioc->aic->dtor)
|
if (ioc->aic && ioc->aic->dtor)
|
||||||
ioc->aic->dtor(ioc->aic);
|
ioc->aic->dtor(ioc->aic);
|
||||||
if (ioc->cic && ioc->cic->dtor)
|
if (ioc->cic && ioc->cic->dtor)
|
||||||
ioc->cic->dtor(ioc->cic);
|
ioc->cic->dtor(ioc->cic);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
kmem_cache_free(iocontext_cachep, ioc);
|
kmem_cache_free(iocontext_cachep, ioc);
|
||||||
}
|
}
|
||||||
|
@ -3614,10 +3636,13 @@ static ssize_t
|
||||||
queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||||
{
|
{
|
||||||
struct request_list *rl = &q->rq;
|
struct request_list *rl = &q->rq;
|
||||||
|
unsigned long nr;
|
||||||
|
int ret = queue_var_store(&nr, page, count);
|
||||||
|
if (nr < BLKDEV_MIN_RQ)
|
||||||
|
nr = BLKDEV_MIN_RQ;
|
||||||
|
|
||||||
int ret = queue_var_store(&q->nr_requests, page, count);
|
spin_lock_irq(q->queue_lock);
|
||||||
if (q->nr_requests < BLKDEV_MIN_RQ)
|
q->nr_requests = nr;
|
||||||
q->nr_requests = BLKDEV_MIN_RQ;
|
|
||||||
blk_queue_congestion_threshold(q);
|
blk_queue_congestion_threshold(q);
|
||||||
|
|
||||||
if (rl->count[READ] >= queue_congestion_on_threshold(q))
|
if (rl->count[READ] >= queue_congestion_on_threshold(q))
|
||||||
|
@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||||
blk_clear_queue_full(q, WRITE);
|
blk_clear_queue_full(q, WRITE);
|
||||||
wake_up(&rl->wait[WRITE]);
|
wake_up(&rl->wait[WRITE]);
|
||||||
}
|
}
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3758,13 +3784,19 @@ static ssize_t
|
||||||
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||||
{
|
{
|
||||||
struct queue_sysfs_entry *entry = to_queue(attr);
|
struct queue_sysfs_entry *entry = to_queue(attr);
|
||||||
struct request_queue *q;
|
request_queue_t *q = container_of(kobj, struct request_queue, kobj);
|
||||||
|
ssize_t res;
|
||||||
|
|
||||||
q = container_of(kobj, struct request_queue, kobj);
|
|
||||||
if (!entry->show)
|
if (!entry->show)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
return entry->show(q, page);
|
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
res = entry->show(q, page);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
|
@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||||
const char *page, size_t length)
|
const char *page, size_t length)
|
||||||
{
|
{
|
||||||
struct queue_sysfs_entry *entry = to_queue(attr);
|
struct queue_sysfs_entry *entry = to_queue(attr);
|
||||||
struct request_queue *q;
|
request_queue_t *q = container_of(kobj, struct request_queue, kobj);
|
||||||
|
|
||||||
|
ssize_t res;
|
||||||
|
|
||||||
q = container_of(kobj, struct request_queue, kobj);
|
|
||||||
if (!entry->store)
|
if (!entry->store)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
return entry->store(q, page, length);
|
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
res = entry->store(q, page, length);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sysfs_ops queue_sysfs_ops = {
|
static struct sysfs_ops queue_sysfs_ops = {
|
||||||
|
@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = {
|
||||||
static struct kobj_type queue_ktype = {
|
static struct kobj_type queue_ktype = {
|
||||||
.sysfs_ops = &queue_sysfs_ops,
|
.sysfs_ops = &queue_sysfs_ops,
|
||||||
.default_attrs = default_attrs,
|
.default_attrs = default_attrs,
|
||||||
|
.release = blk_release_queue,
|
||||||
};
|
};
|
||||||
|
|
||||||
int blk_register_queue(struct gendisk *disk)
|
int blk_register_queue(struct gendisk *disk)
|
||||||
|
@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
q->kobj.parent = kobject_get(&disk->kobj);
|
q->kobj.parent = kobject_get(&disk->kobj);
|
||||||
if (!q->kobj.parent)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
|
ret = kobject_add(&q->kobj);
|
||||||
q->kobj.ktype = &queue_ktype;
|
|
||||||
|
|
||||||
ret = kobject_register(&q->kobj);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
kobject_uevent(&q->kobj, KOBJ_ADD);
|
||||||
|
|
||||||
ret = elv_register_queue(q);
|
ret = elv_register_queue(q);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kobject_unregister(&q->kobj);
|
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||||
|
kobject_del(&q->kobj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk)
|
||||||
if (q && q->request_fn) {
|
if (q && q->request_fn) {
|
||||||
elv_unregister_queue(q);
|
elv_unregister_queue(q);
|
||||||
|
|
||||||
kobject_unregister(&q->kobj);
|
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||||
|
kobject_del(&q->kobj);
|
||||||
kobject_put(&disk->kobj);
|
kobject_put(&disk->kobj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1307,7 +1307,7 @@ static int __init loop_init(void)
|
||||||
|
|
||||||
out_mem4:
|
out_mem4:
|
||||||
while (i--)
|
while (i--)
|
||||||
blk_put_queue(loop_dev[i].lo_queue);
|
blk_cleanup_queue(loop_dev[i].lo_queue);
|
||||||
devfs_remove("loop");
|
devfs_remove("loop");
|
||||||
i = max_loop;
|
i = max_loop;
|
||||||
out_mem3:
|
out_mem3:
|
||||||
|
@ -1328,7 +1328,7 @@ static void loop_exit(void)
|
||||||
|
|
||||||
for (i = 0; i < max_loop; i++) {
|
for (i = 0; i < max_loop; i++) {
|
||||||
del_gendisk(disks[i]);
|
del_gendisk(disks[i]);
|
||||||
blk_put_queue(loop_dev[i].lo_queue);
|
blk_cleanup_queue(loop_dev[i].lo_queue);
|
||||||
put_disk(disks[i]);
|
put_disk(disks[i]);
|
||||||
}
|
}
|
||||||
devfs_remove("loop");
|
devfs_remove("loop");
|
||||||
|
|
|
@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_new_dev:
|
out_new_dev:
|
||||||
blk_put_queue(disk->queue);
|
blk_cleanup_queue(disk->queue);
|
||||||
out_mem2:
|
out_mem2:
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
out_mem:
|
out_mem:
|
||||||
|
@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
|
||||||
DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
|
DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
|
||||||
|
|
||||||
del_gendisk(pd->disk);
|
del_gendisk(pd->disk);
|
||||||
blk_put_queue(pd->disk->queue);
|
blk_cleanup_queue(pd->disk->queue);
|
||||||
put_disk(pd->disk);
|
put_disk(pd->disk);
|
||||||
|
|
||||||
pkt_devs[idx] = NULL;
|
pkt_devs[idx] = NULL;
|
||||||
|
|
|
@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev)
|
||||||
pci_free_consistent(card->dev, PAGE_SIZE*2,
|
pci_free_consistent(card->dev, PAGE_SIZE*2,
|
||||||
card->mm_pages[1].desc,
|
card->mm_pages[1].desc,
|
||||||
card->mm_pages[1].page_dma);
|
card->mm_pages[1].page_dma);
|
||||||
blk_put_queue(card->queue);
|
blk_cleanup_queue(card->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pci_device_id mm_pci_ids[] = { {
|
static const struct pci_device_id mm_pci_ids[] = { {
|
||||||
|
|
|
@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
|
||||||
bad3:
|
bad3:
|
||||||
mempool_destroy(md->io_pool);
|
mempool_destroy(md->io_pool);
|
||||||
bad2:
|
bad2:
|
||||||
blk_put_queue(md->queue);
|
blk_cleanup_queue(md->queue);
|
||||||
free_minor(minor);
|
free_minor(minor);
|
||||||
bad1:
|
bad1:
|
||||||
kfree(md);
|
kfree(md);
|
||||||
|
@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md)
|
||||||
del_gendisk(md->disk);
|
del_gendisk(md->disk);
|
||||||
free_minor(minor);
|
free_minor(minor);
|
||||||
put_disk(md->disk);
|
put_disk(md->disk);
|
||||||
blk_put_queue(md->queue);
|
blk_cleanup_queue(md->queue);
|
||||||
kfree(md);
|
kfree(md);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev)
|
||||||
return;
|
return;
|
||||||
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
|
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
|
||||||
list_del(&mddev->all_mddevs);
|
list_del(&mddev->all_mddevs);
|
||||||
blk_put_queue(mddev->queue);
|
/* that blocks */
|
||||||
|
blk_cleanup_queue(mddev->queue);
|
||||||
|
/* that also blocks */
|
||||||
kobject_unregister(&mddev->kobj);
|
kobject_unregister(&mddev->kobj);
|
||||||
|
/* result blows... */
|
||||||
}
|
}
|
||||||
spin_unlock(&all_mddevs_lock);
|
spin_unlock(&all_mddevs_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -273,7 +273,7 @@ removeseg:
|
||||||
list_del(&dev_info->lh);
|
list_del(&dev_info->lh);
|
||||||
|
|
||||||
del_gendisk(dev_info->gd);
|
del_gendisk(dev_info->gd);
|
||||||
blk_put_queue(dev_info->dcssblk_queue);
|
blk_cleanup_queue(dev_info->dcssblk_queue);
|
||||||
dev_info->gd->queue = NULL;
|
dev_info->gd->queue = NULL;
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
device_unregister(dev);
|
device_unregister(dev);
|
||||||
|
@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||||
unregister_dev:
|
unregister_dev:
|
||||||
PRINT_ERR("device_create_file() failed!\n");
|
PRINT_ERR("device_create_file() failed!\n");
|
||||||
list_del(&dev_info->lh);
|
list_del(&dev_info->lh);
|
||||||
blk_put_queue(dev_info->dcssblk_queue);
|
blk_cleanup_queue(dev_info->dcssblk_queue);
|
||||||
dev_info->gd->queue = NULL;
|
dev_info->gd->queue = NULL;
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
device_unregister(&dev_info->dev);
|
device_unregister(&dev_info->dev);
|
||||||
|
@ -505,7 +505,7 @@ list_del:
|
||||||
unload_seg:
|
unload_seg:
|
||||||
segment_unload(local_buf);
|
segment_unload(local_buf);
|
||||||
dealloc_gendisk:
|
dealloc_gendisk:
|
||||||
blk_put_queue(dev_info->dcssblk_queue);
|
blk_cleanup_queue(dev_info->dcssblk_queue);
|
||||||
dev_info->gd->queue = NULL;
|
dev_info->gd->queue = NULL;
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
free_dev_info:
|
free_dev_info:
|
||||||
|
@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
|
||||||
list_del(&dev_info->lh);
|
list_del(&dev_info->lh);
|
||||||
|
|
||||||
del_gendisk(dev_info->gd);
|
del_gendisk(dev_info->gd);
|
||||||
blk_put_queue(dev_info->dcssblk_queue);
|
blk_cleanup_queue(dev_info->dcssblk_queue);
|
||||||
dev_info->gd->queue = NULL;
|
dev_info->gd->queue = NULL;
|
||||||
put_disk(dev_info->gd);
|
put_disk(dev_info->gd);
|
||||||
device_unregister(&dev_info->dev);
|
device_unregister(&dev_info->dev);
|
||||||
|
|
|
@ -58,7 +58,7 @@ struct cfq_io_context {
|
||||||
* circular list of cfq_io_contexts belonging to a process io context
|
* circular list of cfq_io_contexts belonging to a process io context
|
||||||
*/
|
*/
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct cfq_queue *cfqq;
|
struct cfq_queue *cfqq[2];
|
||||||
void *key;
|
void *key;
|
||||||
|
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
|
@ -69,6 +69,8 @@ struct cfq_io_context {
|
||||||
unsigned long ttime_samples;
|
unsigned long ttime_samples;
|
||||||
unsigned long ttime_mean;
|
unsigned long ttime_mean;
|
||||||
|
|
||||||
|
struct list_head queue_list;
|
||||||
|
|
||||||
void (*dtor)(struct cfq_io_context *);
|
void (*dtor)(struct cfq_io_context *);
|
||||||
void (*exit)(struct cfq_io_context *);
|
void (*exit)(struct cfq_io_context *);
|
||||||
};
|
};
|
||||||
|
@ -404,8 +406,6 @@ struct request_queue
|
||||||
|
|
||||||
struct blk_queue_tag *queue_tags;
|
struct blk_queue_tag *queue_tags;
|
||||||
|
|
||||||
atomic_t refcnt;
|
|
||||||
|
|
||||||
unsigned int nr_sorted;
|
unsigned int nr_sorted;
|
||||||
unsigned int in_flight;
|
unsigned int in_flight;
|
||||||
|
|
||||||
|
@ -424,6 +424,8 @@ struct request_queue
|
||||||
struct request pre_flush_rq, bar_rq, post_flush_rq;
|
struct request pre_flush_rq, bar_rq, post_flush_rq;
|
||||||
struct request *orig_bar_rq;
|
struct request *orig_bar_rq;
|
||||||
unsigned int bi_size;
|
unsigned int bi_size;
|
||||||
|
|
||||||
|
struct mutex sysfs_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define RQ_INACTIVE (-1)
|
#define RQ_INACTIVE (-1)
|
||||||
|
@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void);
|
||||||
int blk_get_queue(request_queue_t *);
|
int blk_get_queue(request_queue_t *);
|
||||||
request_queue_t *blk_alloc_queue(gfp_t);
|
request_queue_t *blk_alloc_queue(gfp_t);
|
||||||
request_queue_t *blk_alloc_queue_node(gfp_t, int);
|
request_queue_t *blk_alloc_queue_node(gfp_t, int);
|
||||||
#define blk_put_queue(q) blk_cleanup_queue((q))
|
extern void blk_put_queue(request_queue_t *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tag stuff
|
* tag stuff
|
||||||
|
|
|
@ -48,10 +48,17 @@ struct elevator_ops
|
||||||
|
|
||||||
elevator_init_fn *elevator_init_fn;
|
elevator_init_fn *elevator_init_fn;
|
||||||
elevator_exit_fn *elevator_exit_fn;
|
elevator_exit_fn *elevator_exit_fn;
|
||||||
|
void (*trim)(struct io_context *);
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ELV_NAME_MAX (16)
|
#define ELV_NAME_MAX (16)
|
||||||
|
|
||||||
|
struct elv_fs_entry {
|
||||||
|
struct attribute attr;
|
||||||
|
ssize_t (*show)(elevator_t *, char *);
|
||||||
|
ssize_t (*store)(elevator_t *, const char *, size_t);
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* identifies an elevator type, such as AS or deadline
|
* identifies an elevator type, such as AS or deadline
|
||||||
*/
|
*/
|
||||||
|
@ -60,7 +67,7 @@ struct elevator_type
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct elevator_ops ops;
|
struct elevator_ops ops;
|
||||||
struct elevator_type *elevator_type;
|
struct elevator_type *elevator_type;
|
||||||
struct kobj_type *elevator_ktype;
|
struct elv_fs_entry *elevator_attrs;
|
||||||
char elevator_name[ELV_NAME_MAX];
|
char elevator_name[ELV_NAME_MAX];
|
||||||
struct module *elevator_owner;
|
struct module *elevator_owner;
|
||||||
};
|
};
|
||||||
|
@ -74,6 +81,7 @@ struct elevator_queue
|
||||||
void *elevator_data;
|
void *elevator_data;
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
struct elevator_type *elevator_type;
|
struct elevator_type *elevator_type;
|
||||||
|
struct mutex sysfs_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code)
|
||||||
panic("Attempted to kill the idle task!");
|
panic("Attempted to kill the idle task!");
|
||||||
if (unlikely(tsk->pid == 1))
|
if (unlikely(tsk->pid == 1))
|
||||||
panic("Attempted to kill init!");
|
panic("Attempted to kill init!");
|
||||||
if (tsk->io_context)
|
|
||||||
exit_io_context();
|
|
||||||
|
|
||||||
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
|
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
|
||||||
current->ptrace_message = code;
|
current->ptrace_message = code;
|
||||||
|
@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code)
|
||||||
if (unlikely(tsk->flags & PF_EXITING)) {
|
if (unlikely(tsk->flags & PF_EXITING)) {
|
||||||
printk(KERN_ALERT
|
printk(KERN_ALERT
|
||||||
"Fixing recursive fault but reboot is needed!\n");
|
"Fixing recursive fault but reboot is needed!\n");
|
||||||
|
if (tsk->io_context)
|
||||||
|
exit_io_context();
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code)
|
||||||
*/
|
*/
|
||||||
mutex_debug_check_no_locks_held(tsk);
|
mutex_debug_check_no_locks_held(tsk);
|
||||||
|
|
||||||
|
if (tsk->io_context)
|
||||||
|
exit_io_context();
|
||||||
|
|
||||||
/* PF_DEAD causes final put_task_struct after we schedule. */
|
/* PF_DEAD causes final put_task_struct after we schedule. */
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
BUG_ON(tsk->flags & PF_DEAD);
|
BUG_ON(tsk->flags & PF_DEAD);
|
||||||
|
|
Loading…
Reference in New Issue