From 880121be1179a0db3eb4fdb198108d9475b8be4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 15 Apr 2021 13:56:23 +0200 Subject: [PATCH] mm/vmscan: add sync_shrinkers function v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While unplugging a device the TTM shrinker implementation needs a barrier to make sure that all concurrent shrink operations are done and no other CPU is referring to a device specific pool any more. Taking and releasing the shrinker semaphore on the write side after unmapping and freeing all pages from the device pool should make sure that no shrinker is running in paralell. This allows us to avoid the contented mutex in the TTM pool implementation for every alloc/free operation. v2: rework the commit message to make clear why we need this v3: rename the function and add more doc as suggested by Daniel Signed-off-by: Christian König Acked-by: Huang Rui Acked-by: Andrew Morton Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210820120528.81114-2-christian.koenig@amd.com --- include/linux/shrinker.h | 1 + mm/vmscan.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9814fff58a69..76fbf92b04d9 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -93,4 +93,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker); extern int register_shrinker(struct shrinker *shrinker); extern void unregister_shrinker(struct shrinker *shrinker); extern void free_prealloced_shrinker(struct shrinker *shrinker); +extern void synchronize_shrinkers(void); #endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 4620df62f0ff..5d22a63472f0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -638,6 +638,21 @@ void unregister_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(unregister_shrinker); +/** + * synchronize_shrinkers - Wait for all running shrinkers to complete. + * + * This is equivalent to calling unregister_shrink() and register_shrinker(), + * but atomically and with less overhead. This is useful to guarantee that all + * shrinker invocations have seen an update, before freeing memory, similar to + * rcu. + */ +void synchronize_shrinkers(void) +{ + down_write(&shrinker_rwsem); + up_write(&shrinker_rwsem); +} +EXPORT_SYMBOL(synchronize_shrinkers); + #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,