staging: lustre: libcfs: use a workqueue for rehash work.
lustre has a work-item queuing scheme that provides the same functionality as linux work_queues. To make the code easier for linux devs to follow, change to use work_queues. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
d487fe31f4
commit
0aa211e398
|
@ -126,7 +126,7 @@ extern struct miscdevice libcfs_dev;
|
|||
*/
|
||||
extern char lnet_debug_log_upcall[1024];
|
||||
|
||||
extern struct cfs_wi_sched *cfs_sched_rehash;
|
||||
extern struct workqueue_struct *cfs_rehash_wq;
|
||||
|
||||
struct lnet_debugfs_symlink_def {
|
||||
char *name;
|
||||
|
|
|
@ -248,7 +248,7 @@ struct cfs_hash {
|
|||
/** # of iterators (caller of cfs_hash_for_each_*) */
|
||||
u32 hs_iterators;
|
||||
/** rehash workitem */
|
||||
struct cfs_workitem hs_rehash_wi;
|
||||
struct work_struct hs_rehash_work;
|
||||
/** refcount on this hash table */
|
||||
atomic_t hs_refcount;
|
||||
/** rehash buckets-table */
|
||||
|
@ -265,7 +265,7 @@ struct cfs_hash {
|
|||
/** bits when we found the max depth */
|
||||
unsigned int hs_dep_bits;
|
||||
/** workitem to output max depth */
|
||||
struct cfs_workitem hs_dep_wi;
|
||||
struct work_struct hs_dep_work;
|
||||
#endif
|
||||
/** name of htable */
|
||||
char hs_name[0];
|
||||
|
@ -738,7 +738,7 @@ u64 cfs_hash_size_get(struct cfs_hash *hs);
|
|||
*/
|
||||
void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
|
||||
void cfs_hash_rehash_cancel(struct cfs_hash *hs);
|
||||
int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
|
||||
void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
|
||||
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
|
||||
void *new_key, struct hlist_node *hnode);
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ module_param(warn_on_depth, uint, 0644);
|
|||
MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
|
||||
#endif
|
||||
|
||||
struct cfs_wi_sched *cfs_sched_rehash;
|
||||
struct workqueue_struct *cfs_rehash_wq;
|
||||
|
||||
static inline void
|
||||
cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
|
||||
|
@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
|
|||
hs->hs_dep_bits = hs->hs_cur_bits;
|
||||
spin_unlock(&hs->hs_dep_lock);
|
||||
|
||||
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
|
||||
queue_work(cfs_rehash_wq, &hs->hs_dep_work);
|
||||
# endif
|
||||
}
|
||||
|
||||
|
@ -939,12 +939,12 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
|
|||
* @flags - CFS_HASH_REHASH enable synamic hash resizing
|
||||
* - CFS_HASH_SORT enable chained hash sort
|
||||
*/
|
||||
static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
|
||||
static void cfs_hash_rehash_worker(struct work_struct *work);
|
||||
|
||||
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
|
||||
static int cfs_hash_dep_print(struct cfs_workitem *wi)
|
||||
static void cfs_hash_dep_print(struct work_struct *work)
|
||||
{
|
||||
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
|
||||
struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
|
||||
int dep;
|
||||
int bkt;
|
||||
int off;
|
||||
|
@ -968,21 +968,12 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
|
|||
static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
|
||||
{
|
||||
spin_lock_init(&hs->hs_dep_lock);
|
||||
cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
|
||||
INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
|
||||
}
|
||||
|
||||
static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
|
||||
{
|
||||
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
|
||||
return;
|
||||
|
||||
spin_lock(&hs->hs_dep_lock);
|
||||
while (hs->hs_dep_bits) {
|
||||
spin_unlock(&hs->hs_dep_lock);
|
||||
cond_resched();
|
||||
spin_lock(&hs->hs_dep_lock);
|
||||
}
|
||||
spin_unlock(&hs->hs_dep_lock);
|
||||
cancel_work_sync(&hs->hs_dep_work);
|
||||
}
|
||||
|
||||
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
|
||||
|
@ -1044,7 +1035,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
|
|||
hs->hs_ops = ops;
|
||||
hs->hs_extra_bytes = extra_bytes;
|
||||
hs->hs_rehash_bits = 0;
|
||||
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
|
||||
INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
|
||||
cfs_hash_depth_wi_init(hs);
|
||||
|
||||
if (cfs_hash_with_rehash(hs))
|
||||
|
@ -1364,6 +1355,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
|
|||
|
||||
cfs_hash_lock(hs, 1);
|
||||
hs->hs_iterators++;
|
||||
cfs_hash_unlock(hs, 1);
|
||||
|
||||
/* NB: iteration is mostly called by service thread,
|
||||
* we tend to cancel pending rehash-request, instead of
|
||||
|
@ -1371,8 +1363,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
|
|||
* after iteration
|
||||
*/
|
||||
if (cfs_hash_is_rehashing(hs))
|
||||
cfs_hash_rehash_cancel_locked(hs);
|
||||
cfs_hash_unlock(hs, 1);
|
||||
cfs_hash_rehash_cancel(hs);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1774,42 +1765,13 @@ EXPORT_SYMBOL(cfs_hash_for_each_key);
|
|||
* theta thresholds for @hs are tunable via cfs_hash_set_theta().
|
||||
*/
|
||||
void
|
||||
cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
|
||||
cfs_hash_rehash_cancel(struct cfs_hash *hs)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* need hold cfs_hash_lock(hs, 1) */
|
||||
LASSERT(cfs_hash_with_rehash(hs) &&
|
||||
!cfs_hash_with_no_lock(hs));
|
||||
|
||||
if (!cfs_hash_is_rehashing(hs))
|
||||
return;
|
||||
|
||||
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
|
||||
hs->hs_rehash_bits = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 2; cfs_hash_is_rehashing(hs); i++) {
|
||||
cfs_hash_unlock(hs, 1);
|
||||
/* raise console warning while waiting too long */
|
||||
CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
|
||||
"hash %s is still rehashing, rescheded %d\n",
|
||||
hs->hs_name, i - 1);
|
||||
cond_resched();
|
||||
cfs_hash_lock(hs, 1);
|
||||
}
|
||||
LASSERT(cfs_hash_with_rehash(hs));
|
||||
cancel_work_sync(&hs->hs_rehash_work);
|
||||
}
|
||||
|
||||
void
|
||||
cfs_hash_rehash_cancel(struct cfs_hash *hs)
|
||||
{
|
||||
cfs_hash_lock(hs, 1);
|
||||
cfs_hash_rehash_cancel_locked(hs);
|
||||
cfs_hash_unlock(hs, 1);
|
||||
}
|
||||
|
||||
int
|
||||
cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
|
||||
{
|
||||
int rc;
|
||||
|
@ -1821,21 +1783,21 @@ cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
|
|||
rc = cfs_hash_rehash_bits(hs);
|
||||
if (rc <= 0) {
|
||||
cfs_hash_unlock(hs, 1);
|
||||
return rc;
|
||||
return;
|
||||
}
|
||||
|
||||
hs->hs_rehash_bits = rc;
|
||||
if (!do_rehash) {
|
||||
/* launch and return */
|
||||
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
|
||||
queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
|
||||
cfs_hash_unlock(hs, 1);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* rehash right now */
|
||||
cfs_hash_unlock(hs, 1);
|
||||
|
||||
return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
|
||||
cfs_hash_rehash_worker(&hs->hs_rehash_work);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1869,10 +1831,10 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
|
|||
return c;
|
||||
}
|
||||
|
||||
static int
|
||||
cfs_hash_rehash_worker(struct cfs_workitem *wi)
|
||||
static void
|
||||
cfs_hash_rehash_worker(struct work_struct *work)
|
||||
{
|
||||
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
|
||||
struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_rehash_work);
|
||||
struct cfs_hash_bucket **bkts;
|
||||
struct cfs_hash_bd bd;
|
||||
unsigned int old_size;
|
||||
|
@ -1956,8 +1918,6 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi)
|
|||
hs->hs_cur_bits = hs->hs_rehash_bits;
|
||||
out:
|
||||
hs->hs_rehash_bits = 0;
|
||||
if (rc == -ESRCH) /* never be scheduled again */
|
||||
cfs_wi_exit(cfs_sched_rehash, wi);
|
||||
bsize = cfs_hash_bkt_size(hs);
|
||||
cfs_hash_unlock(hs, 1);
|
||||
/* can't refer to @hs anymore because it could be destroyed */
|
||||
|
@ -1965,8 +1925,6 @@ out:
|
|||
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
|
||||
if (rc)
|
||||
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
|
||||
/* return 1 only if cfs_wi_exit is called */
|
||||
return rc == -ESRCH;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -554,12 +554,10 @@ static int libcfs_init(void)
|
|||
goto cleanup_deregister;
|
||||
}
|
||||
|
||||
/* max to 4 threads, should be enough for rehash */
|
||||
rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
|
||||
rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
|
||||
rc, &cfs_sched_rehash);
|
||||
if (rc) {
|
||||
CERROR("Startup workitem scheduler: error: %d\n", rc);
|
||||
cfs_rehash_wq = alloc_workqueue("cfs_rh", WQ_SYSFS, 4);
|
||||
if (!cfs_rehash_wq) {
|
||||
CERROR("Failed to start rehash workqueue.\n");
|
||||
rc = -ENOMEM;
|
||||
goto cleanup_deregister;
|
||||
}
|
||||
|
||||
|
@ -590,9 +588,9 @@ static void libcfs_exit(void)
|
|||
|
||||
lustre_remove_debugfs();
|
||||
|
||||
if (cfs_sched_rehash) {
|
||||
cfs_wi_sched_destroy(cfs_sched_rehash);
|
||||
cfs_sched_rehash = NULL;
|
||||
if (cfs_rehash_wq) {
|
||||
destroy_workqueue(cfs_rehash_wq);
|
||||
cfs_rehash_wq = NULL;
|
||||
}
|
||||
|
||||
cfs_crypto_unregister();
|
||||
|
|
Loading…
Reference in New Issue