ext4: fix race in ext4_mb_add_n_trim()
In ext4_mb_add_n_trim(), lg_prealloc_lock should be taken when changing the lg_prealloc_list. Signed-off-by: Niu Yawei <yawei.niu@intel.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org
This commit is contained in:
parent
87e698734b
commit
f116700971
|
@ -4136,7 +4136,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||||||
/* The max size of hash table is PREALLOC_TB_SIZE */
|
/* The max size of hash table is PREALLOC_TB_SIZE */
|
||||||
order = PREALLOC_TB_SIZE - 1;
|
order = PREALLOC_TB_SIZE - 1;
|
||||||
/* Add the prealloc space to lg */
|
/* Add the prealloc space to lg */
|
||||||
rcu_read_lock();
|
spin_lock(&lg->lg_prealloc_lock);
|
||||||
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
|
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
|
||||||
pa_inode_list) {
|
pa_inode_list) {
|
||||||
spin_lock(&tmp_pa->pa_lock);
|
spin_lock(&tmp_pa->pa_lock);
|
||||||
|
@ -4160,7 +4160,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||||||
if (!added)
|
if (!added)
|
||||||
list_add_tail_rcu(&pa->pa_inode_list,
|
list_add_tail_rcu(&pa->pa_inode_list,
|
||||||
&lg->lg_prealloc_list[order]);
|
&lg->lg_prealloc_list[order]);
|
||||||
rcu_read_unlock();
|
spin_unlock(&lg->lg_prealloc_lock);
|
||||||
|
|
||||||
/* Now trim the list to be not more than 8 elements */
|
/* Now trim the list to be not more than 8 elements */
|
||||||
if (lg_prealloc_count > 8) {
|
if (lg_prealloc_count > 8) {
|
||||||
|
|
Loading…
Reference in New Issue