ext4: use percpu counter for extent cache count
Use a percpu counter rather than atomic types for shrinker accounting. There's no need for ultimate accuracy in the shrinker, so this should come a little more cheaply. The percpu struct is somewhat large, but there was a big gap before the cache-aligned s_es_lru_lock anyway, and it fits nicely in there. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
parent
246307745c
commit
1ac6466f25
|
@ -1268,7 +1268,6 @@ struct ext4_sb_info {
|
||||||
atomic_t s_mb_preallocated;
|
atomic_t s_mb_preallocated;
|
||||||
atomic_t s_mb_discarded;
|
atomic_t s_mb_discarded;
|
||||||
atomic_t s_lock_busy;
|
atomic_t s_lock_busy;
|
||||||
atomic_t s_extent_cache_cnt;
|
|
||||||
|
|
||||||
/* locality groups */
|
/* locality groups */
|
||||||
struct ext4_locality_group __percpu *s_locality_groups;
|
struct ext4_locality_group __percpu *s_locality_groups;
|
||||||
|
@ -1310,6 +1309,7 @@ struct ext4_sb_info {
|
||||||
/* Reclaim extents from extent status tree */
|
/* Reclaim extents from extent status tree */
|
||||||
struct shrinker s_es_shrinker;
|
struct shrinker s_es_shrinker;
|
||||||
struct list_head s_es_lru;
|
struct list_head s_es_lru;
|
||||||
|
struct percpu_counter s_extent_cache_cnt;
|
||||||
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
|
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -305,7 +305,7 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
|
||||||
*/
|
*/
|
||||||
if (!ext4_es_is_delayed(es)) {
|
if (!ext4_es_is_delayed(es)) {
|
||||||
EXT4_I(inode)->i_es_lru_nr++;
|
EXT4_I(inode)->i_es_lru_nr++;
|
||||||
atomic_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
|
percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
return es;
|
return es;
|
||||||
|
@ -317,7 +317,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
|
||||||
if (!ext4_es_is_delayed(es)) {
|
if (!ext4_es_is_delayed(es)) {
|
||||||
BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
|
BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
|
||||||
EXT4_I(inode)->i_es_lru_nr--;
|
EXT4_I(inode)->i_es_lru_nr--;
|
||||||
atomic_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
|
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
kmem_cache_free(ext4_es_cachep, es);
|
kmem_cache_free(ext4_es_cachep, es);
|
||||||
|
@ -678,7 +678,7 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
int nr_to_scan = sc->nr_to_scan;
|
int nr_to_scan = sc->nr_to_scan;
|
||||||
int ret, nr_shrunk = 0;
|
int ret, nr_shrunk = 0;
|
||||||
|
|
||||||
ret = atomic_read(&sbi->s_extent_cache_cnt);
|
ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
|
||||||
trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
|
trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
|
||||||
|
|
||||||
if (!nr_to_scan)
|
if (!nr_to_scan)
|
||||||
|
@ -711,7 +711,7 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
list_splice_tail(&scanned, &sbi->s_es_lru);
|
list_splice_tail(&scanned, &sbi->s_es_lru);
|
||||||
spin_unlock(&sbi->s_es_lru_lock);
|
spin_unlock(&sbi->s_es_lru_lock);
|
||||||
|
|
||||||
ret = atomic_read(&sbi->s_extent_cache_cnt);
|
ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
|
||||||
trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
|
trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -783,6 +783,7 @@ static void ext4_put_super(struct super_block *sb)
|
||||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||||
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
|
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
|
||||||
|
percpu_counter_destroy(&sbi->s_extent_cache_cnt);
|
||||||
brelse(sbi->s_sbh);
|
brelse(sbi->s_sbh);
|
||||||
#ifdef CONFIG_QUOTA
|
#ifdef CONFIG_QUOTA
|
||||||
for (i = 0; i < MAXQUOTAS; i++)
|
for (i = 0; i < MAXQUOTAS; i++)
|
||||||
|
@ -3688,6 +3689,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
|
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
|
||||||
}
|
}
|
||||||
|
if (!err) {
|
||||||
|
err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
|
||||||
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
ext4_msg(sb, KERN_ERR, "insufficient memory");
|
ext4_msg(sb, KERN_ERR, "insufficient memory");
|
||||||
goto failed_mount3;
|
goto failed_mount3;
|
||||||
|
@ -3993,6 +3997,7 @@ failed_mount3:
|
||||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||||
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
|
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
|
||||||
|
percpu_counter_destroy(&sbi->s_extent_cache_cnt);
|
||||||
if (sbi->s_mmp_tsk)
|
if (sbi->s_mmp_tsk)
|
||||||
kthread_stop(sbi->s_mmp_tsk);
|
kthread_stop(sbi->s_mmp_tsk);
|
||||||
failed_mount2:
|
failed_mount2:
|
||||||
|
|
Loading…
Reference in New Issue