block cfq: don't use atomic_t for cfq_group
cfq_group->ref is used with queue_lock hold, the only exception is cfq_set_request, which looks like a bug to me, so ref doesn't need to be an atomic and atomic operation is slower. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
30d7b9448f
commit
329a67815b
|
@ -207,7 +207,7 @@ struct cfq_group {
|
||||||
struct blkio_group blkg;
|
struct blkio_group blkg;
|
||||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||||
struct hlist_node cfqd_node;
|
struct hlist_node cfqd_node;
|
||||||
atomic_t ref;
|
int ref;
|
||||||
#endif
|
#endif
|
||||||
/* number of requests that are on the dispatch list or inside driver */
|
/* number of requests that are on the dispatch list or inside driver */
|
||||||
int dispatched;
|
int dispatched;
|
||||||
|
@ -1014,7 +1014,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
||||||
* elevator which will be dropped by either elevator exit
|
* elevator which will be dropped by either elevator exit
|
||||||
* or cgroup deletion path depending on who is exiting first.
|
* or cgroup deletion path depending on who is exiting first.
|
||||||
*/
|
*/
|
||||||
atomic_set(&cfqg->ref, 1);
|
cfqg->ref = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add group onto cgroup list. It might happen that bdi->dev is
|
* Add group onto cgroup list. It might happen that bdi->dev is
|
||||||
|
@ -1059,7 +1059,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
|
||||||
|
|
||||||
static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
|
static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
|
||||||
{
|
{
|
||||||
atomic_inc(&cfqg->ref);
|
cfqg->ref++;
|
||||||
return cfqg;
|
return cfqg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1071,7 +1071,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
||||||
|
|
||||||
cfqq->cfqg = cfqg;
|
cfqq->cfqg = cfqg;
|
||||||
/* cfqq reference on cfqg */
|
/* cfqq reference on cfqg */
|
||||||
atomic_inc(&cfqq->cfqg->ref);
|
cfqq->cfqg->ref++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_put_cfqg(struct cfq_group *cfqg)
|
static void cfq_put_cfqg(struct cfq_group *cfqg)
|
||||||
|
@ -1079,8 +1079,9 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
|
||||||
struct cfq_rb_root *st;
|
struct cfq_rb_root *st;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
BUG_ON(atomic_read(&cfqg->ref) <= 0);
|
BUG_ON(cfqg->ref <= 0);
|
||||||
if (!atomic_dec_and_test(&cfqg->ref))
|
cfqg->ref--;
|
||||||
|
if (cfqg->ref)
|
||||||
return;
|
return;
|
||||||
for_each_cfqg_st(cfqg, i, j, st)
|
for_each_cfqg_st(cfqg, i, j, st)
|
||||||
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
|
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
|
||||||
|
@ -1188,7 +1189,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
||||||
cfqq->orig_cfqg = cfqq->cfqg;
|
cfqq->orig_cfqg = cfqq->cfqg;
|
||||||
cfqq->cfqg = &cfqd->root_group;
|
cfqq->cfqg = &cfqd->root_group;
|
||||||
atomic_inc(&cfqd->root_group.ref);
|
cfqd->root_group.ref++;
|
||||||
group_changed = 1;
|
group_changed = 1;
|
||||||
} else if (!cfqd->cfq_group_isolation
|
} else if (!cfqd->cfq_group_isolation
|
||||||
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
|
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
|
||||||
|
@ -3681,12 +3682,12 @@ new_queue:
|
||||||
|
|
||||||
cfqq->allocated[rw]++;
|
cfqq->allocated[rw]++;
|
||||||
cfqq->ref++;
|
cfqq->ref++;
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
||||||
|
|
||||||
rq->elevator_private = cic;
|
rq->elevator_private = cic;
|
||||||
rq->elevator_private2 = cfqq;
|
rq->elevator_private2 = cfqq;
|
||||||
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
|
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
queue_fail:
|
queue_fail:
|
||||||
|
@ -3884,7 +3885,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||||
* Take a reference to root group which we never drop. This is just
|
* Take a reference to root group which we never drop. This is just
|
||||||
* to make sure that cfq_put_cfqg() does not try to kfree root group
|
* to make sure that cfq_put_cfqg() does not try to kfree root group
|
||||||
*/
|
*/
|
||||||
atomic_set(&cfqg->ref, 1);
|
cfqg->ref = 1;
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
|
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
|
||||||
(void *)cfqd, 0);
|
(void *)cfqd, 0);
|
||||||
|
|
Loading…
Reference in New Issue