mm: numa: limit scope of lock for NUMA migrate rate limiting
NUMA migrate rate limiting protects a migration counter and window using a lock but in some cases this can be a contended lock. It is not critical that the number of pages be perfect, lost updates are acceptable. Reduce the importance of this lock. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Alex Thorlton <athorlton@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1c30e0177e
commit
1c5e9c27cb
|
@ -764,10 +764,7 @@ typedef struct pglist_data {
|
|||
int kswapd_max_order;
|
||||
enum zone_type classzone_idx;
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/*
|
||||
* Lock serializing the per destination node AutoNUMA memory
|
||||
* migration rate limiting data.
|
||||
*/
|
||||
/* Lock serializing the migrate rate limiting window */
|
||||
spinlock_t numabalancing_migrate_lock;
|
||||
|
||||
/* Rate limiting time interval */
|
||||
|
|
21
mm/migrate.c
21
mm/migrate.c
|
@ -1602,26 +1602,29 @@ bool migrate_ratelimited(int node)
|
|||
static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
bool rate_limited = false;
|
||||
|
||||
/*
|
||||
* Rate-limit the amount of data that is being migrated to a node.
|
||||
* Optimal placement is no good if the memory bus is saturated and
|
||||
* all the time is being spent migrating!
|
||||
*/
|
||||
spin_lock(&pgdat->numabalancing_migrate_lock);
|
||||
if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
|
||||
spin_lock(&pgdat->numabalancing_migrate_lock);
|
||||
pgdat->numabalancing_migrate_nr_pages = 0;
|
||||
pgdat->numabalancing_migrate_next_window = jiffies +
|
||||
msecs_to_jiffies(migrate_interval_millisecs);
|
||||
spin_unlock(&pgdat->numabalancing_migrate_lock);
|
||||
}
|
||||
if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
|
||||
rate_limited = true;
|
||||
else
|
||||
pgdat->numabalancing_migrate_nr_pages += nr_pages;
|
||||
spin_unlock(&pgdat->numabalancing_migrate_lock);
|
||||
|
||||
return rate_limited;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* This is an unlocked non-atomic update so errors are possible.
|
||||
* The consequences are failing to migrate when we potentiall should
|
||||
* have which is not severe enough to warrant locking. If it is ever
|
||||
* a problem, it can be converted to a per-cpu counter.
|
||||
*/
|
||||
pgdat->numabalancing_migrate_nr_pages += nr_pages;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
||||
|
|
Loading…
Reference in New Issue