staging: lustre: obdclass: race lustre_profile_list
Running multiple mounts at the same time results in lustre_profile_list corruption when adding a new profile. This patch adds a new spin_lock to protect the list and avoid the bug Signed-off-by: Hiroya Nozaki <nozaki.hiroya@jp.fujitsu.com> Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6600 Reviewed-on: http://review.whamcloud.com/14896 Reviewed-by: Andreas Dilger <andreas.dilger@intel.com> Reviewed-by: Jian Yu <jian.yu@intel.com> Reviewed-by: Oleg Drokin <oleg.drokin@intel.com> Signed-off-by: James Simmons <jsimmons@infradead.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
b44c295dba
commit
f65053dff9
|
@ -176,10 +176,13 @@ struct lustre_profile {
|
||||||
char *lp_profile;
|
char *lp_profile;
|
||||||
char *lp_dt;
|
char *lp_dt;
|
||||||
char *lp_md;
|
char *lp_md;
|
||||||
|
int lp_refs;
|
||||||
|
bool lp_list_deleted;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lustre_profile *class_get_profile(const char *prof);
|
struct lustre_profile *class_get_profile(const char *prof);
|
||||||
void class_del_profile(const char *prof);
|
void class_del_profile(const char *prof);
|
||||||
|
void class_put_profile(struct lustre_profile *lprof);
|
||||||
void class_del_profiles(void);
|
void class_del_profiles(void);
|
||||||
|
|
||||||
#if LUSTRE_TRACKS_LOCK_EXP_REFS
|
#if LUSTRE_TRACKS_LOCK_EXP_REFS
|
||||||
|
|
|
@ -929,6 +929,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
|
||||||
out_free:
|
out_free:
|
||||||
kfree(md);
|
kfree(md);
|
||||||
kfree(dt);
|
kfree(dt);
|
||||||
|
if (lprof)
|
||||||
|
class_put_profile(lprof);
|
||||||
if (err)
|
if (err)
|
||||||
ll_put_super(sb);
|
ll_put_super(sb);
|
||||||
else if (sbi->ll_flags & LL_SBI_VERBOSE)
|
else if (sbi->ll_flags & LL_SBI_VERBOSE)
|
||||||
|
|
|
@ -585,16 +585,21 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
static LIST_HEAD(lustre_profile_list);
|
static LIST_HEAD(lustre_profile_list);
|
||||||
|
static DEFINE_SPINLOCK(lustre_profile_list_lock);
|
||||||
|
|
||||||
struct lustre_profile *class_get_profile(const char *prof)
|
struct lustre_profile *class_get_profile(const char *prof)
|
||||||
{
|
{
|
||||||
struct lustre_profile *lprof;
|
struct lustre_profile *lprof;
|
||||||
|
|
||||||
|
spin_lock(&lustre_profile_list_lock);
|
||||||
list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
|
list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
|
||||||
if (!strcmp(lprof->lp_profile, prof)) {
|
if (!strcmp(lprof->lp_profile, prof)) {
|
||||||
|
lprof->lp_refs++;
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
return lprof;
|
return lprof;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(class_get_profile);
|
EXPORT_SYMBOL(class_get_profile);
|
||||||
|
@ -639,7 +644,11 @@ static int class_add_profile(int proflen, char *prof, int osclen, char *osc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&lustre_profile_list_lock);
|
||||||
|
lprof->lp_refs = 1;
|
||||||
|
lprof->lp_list_deleted = false;
|
||||||
list_add(&lprof->lp_list, &lustre_profile_list);
|
list_add(&lprof->lp_list, &lustre_profile_list);
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
free_lp_dt:
|
free_lp_dt:
|
||||||
|
@ -659,27 +668,59 @@ void class_del_profile(const char *prof)
|
||||||
|
|
||||||
lprof = class_get_profile(prof);
|
lprof = class_get_profile(prof);
|
||||||
if (lprof) {
|
if (lprof) {
|
||||||
|
spin_lock(&lustre_profile_list_lock);
|
||||||
|
/* because get profile increments the ref counter */
|
||||||
|
lprof->lp_refs--;
|
||||||
list_del(&lprof->lp_list);
|
list_del(&lprof->lp_list);
|
||||||
|
lprof->lp_list_deleted = true;
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
|
|
||||||
|
class_put_profile(lprof);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(class_del_profile);
|
||||||
|
|
||||||
|
void class_put_profile(struct lustre_profile *lprof)
|
||||||
|
{
|
||||||
|
spin_lock(&lustre_profile_list_lock);
|
||||||
|
if (--lprof->lp_refs > 0) {
|
||||||
|
LASSERT(lprof->lp_refs > 0);
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
|
|
||||||
|
/* confirm not a negative number */
|
||||||
|
LASSERT(!lprof->lp_refs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* At least one class_del_profile/profiles must be called
|
||||||
|
* on the target profile or lustre_profile_list will corrupt
|
||||||
|
*/
|
||||||
|
LASSERT(lprof->lp_list_deleted);
|
||||||
kfree(lprof->lp_profile);
|
kfree(lprof->lp_profile);
|
||||||
kfree(lprof->lp_dt);
|
kfree(lprof->lp_dt);
|
||||||
kfree(lprof->lp_md);
|
kfree(lprof->lp_md);
|
||||||
kfree(lprof);
|
kfree(lprof);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(class_del_profile);
|
EXPORT_SYMBOL(class_put_profile);
|
||||||
|
|
||||||
/* COMPAT_146 */
|
/* COMPAT_146 */
|
||||||
void class_del_profiles(void)
|
void class_del_profiles(void)
|
||||||
{
|
{
|
||||||
struct lustre_profile *lprof, *n;
|
struct lustre_profile *lprof, *n;
|
||||||
|
|
||||||
|
spin_lock(&lustre_profile_list_lock);
|
||||||
list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
|
list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
|
||||||
list_del(&lprof->lp_list);
|
list_del(&lprof->lp_list);
|
||||||
kfree(lprof->lp_profile);
|
lprof->lp_list_deleted = true;
|
||||||
kfree(lprof->lp_dt);
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
kfree(lprof->lp_md);
|
|
||||||
kfree(lprof);
|
class_put_profile(lprof);
|
||||||
|
|
||||||
|
spin_lock(&lustre_profile_list_lock);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&lustre_profile_list_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(class_del_profiles);
|
EXPORT_SYMBOL(class_del_profiles);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue