hv_balloon: Update the balloon driver to use the SBRM API
This patch is intended as a proof-of-concept for the new SBRM machinery[1]. For some brief background, the idea behind SBRM is using the __cleanup__ attribute to automatically unlock locks (or otherwise release resources) when they go out of scope, similar to C++ style RAII. This promises some benefits such as making code simpler (particularly where you have lots of goto fail; type constructs) as well as reducing the surface area for certain kinds of bugs. The changes in this patch should not result in any difference in how the code actually runs (i.e., it's purely an exercise in this new syntax sugar). In one instance SBRM was not appropriate, so I left that part alone, but all other locking/unlocking is handled automatically in this patch. [1] https://lore.kernel.org/all/20230626125726.GU4253@hirez.programming.kicks-ass.net/ Suggested-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: "Mitchell Levy (Microsoft)" <levymitchell0@gmail.com> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Wei Liu <wei.liu@kernel.org> Link: https://lore.kernel.org/r/20230807-sbrm-hyperv-v2-1-9d2ac15305bd@gmail.com
This commit is contained in:
parent
5d0c230f1d
commit
4f74fb30ea
|
@ -8,6 +8,7 @@
|
||||||
|
|
||||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
|
#include <linux/cleanup.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
|
@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
|
||||||
void *v)
|
void *v)
|
||||||
{
|
{
|
||||||
struct memory_notify *mem = (struct memory_notify *)v;
|
struct memory_notify *mem = (struct memory_notify *)v;
|
||||||
unsigned long flags, pfn_count;
|
unsigned long pfn_count;
|
||||||
|
|
||||||
switch (val) {
|
switch (val) {
|
||||||
case MEM_ONLINE:
|
case MEM_ONLINE:
|
||||||
|
@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case MEM_OFFLINE:
|
case MEM_OFFLINE:
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||||
pfn_count = hv_page_offline_check(mem->start_pfn,
|
pfn_count = hv_page_offline_check(mem->start_pfn,
|
||||||
mem->nr_pages);
|
mem->nr_pages);
|
||||||
if (pfn_count <= dm_device.num_pages_onlined) {
|
if (pfn_count <= dm_device.num_pages_onlined) {
|
||||||
dm_device.num_pages_onlined -= pfn_count;
|
dm_device.num_pages_onlined -= pfn_count;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* We're offlining more pages than we managed to online.
|
* We're offlining more pages than we
|
||||||
* This is unexpected. In any case don't let
|
* managed to online. This is
|
||||||
* num_pages_onlined wrap around zero.
|
* unexpected. In any case don't let
|
||||||
*/
|
* num_pages_onlined wrap around zero.
|
||||||
WARN_ON_ONCE(1);
|
*/
|
||||||
dm_device.num_pages_onlined = 0;
|
WARN_ON_ONCE(1);
|
||||||
|
dm_device.num_pages_onlined = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
|
||||||
break;
|
break;
|
||||||
case MEM_GOING_ONLINE:
|
case MEM_GOING_ONLINE:
|
||||||
case MEM_GOING_OFFLINE:
|
case MEM_GOING_OFFLINE:
|
||||||
|
@ -721,25 +723,24 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||||
unsigned long start_pfn;
|
unsigned long start_pfn;
|
||||||
unsigned long processed_pfn;
|
unsigned long processed_pfn;
|
||||||
unsigned long total_pfn = pfn_count;
|
unsigned long total_pfn = pfn_count;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
for (i = 0; i < (size/HA_CHUNK); i++) {
|
for (i = 0; i < (size/HA_CHUNK); i++) {
|
||||||
start_pfn = start + (i * HA_CHUNK);
|
start_pfn = start + (i * HA_CHUNK);
|
||||||
|
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||||
has->ha_end_pfn += HA_CHUNK;
|
has->ha_end_pfn += HA_CHUNK;
|
||||||
|
|
||||||
if (total_pfn > HA_CHUNK) {
|
if (total_pfn > HA_CHUNK) {
|
||||||
processed_pfn = HA_CHUNK;
|
processed_pfn = HA_CHUNK;
|
||||||
total_pfn -= HA_CHUNK;
|
total_pfn -= HA_CHUNK;
|
||||||
} else {
|
} else {
|
||||||
processed_pfn = total_pfn;
|
processed_pfn = total_pfn;
|
||||||
total_pfn = 0;
|
total_pfn = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
has->covered_end_pfn += processed_pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
has->covered_end_pfn += processed_pfn;
|
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
|
||||||
|
|
||||||
reinit_completion(&dm_device.ol_waitevent);
|
reinit_completion(&dm_device.ol_waitevent);
|
||||||
|
|
||||||
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
|
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
|
||||||
|
@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||||
*/
|
*/
|
||||||
do_hot_add = false;
|
do_hot_add = false;
|
||||||
}
|
}
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||||
has->ha_end_pfn -= HA_CHUNK;
|
has->ha_end_pfn -= HA_CHUNK;
|
||||||
has->covered_end_pfn -= processed_pfn;
|
has->covered_end_pfn -= processed_pfn;
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||||
static void hv_online_page(struct page *pg, unsigned int order)
|
static void hv_online_page(struct page *pg, unsigned int order)
|
||||||
{
|
{
|
||||||
struct hv_hotadd_state *has;
|
struct hv_hotadd_state *has;
|
||||||
unsigned long flags;
|
|
||||||
unsigned long pfn = page_to_pfn(pg);
|
unsigned long pfn = page_to_pfn(pg);
|
||||||
|
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||||
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
||||||
/* The page belongs to a different HAS. */
|
/* The page belongs to a different HAS. */
|
||||||
if ((pfn < has->start_pfn) ||
|
if ((pfn < has->start_pfn) ||
|
||||||
|
@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order)
|
||||||
hv_bring_pgs_online(has, pfn, 1UL << order);
|
hv_bring_pgs_online(has, pfn, 1UL << order);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||||
|
@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||||
struct hv_hotadd_gap *gap;
|
struct hv_hotadd_gap *gap;
|
||||||
unsigned long residual, new_inc;
|
unsigned long residual, new_inc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||||
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
||||||
/*
|
/*
|
||||||
* If the pfn range we are dealing with is not in the current
|
* If the pfn range we are dealing with is not in the current
|
||||||
|
@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||||
ret = 1;
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
|
||||||
{
|
{
|
||||||
struct hv_hotadd_state *ha_region = NULL;
|
struct hv_hotadd_state *ha_region = NULL;
|
||||||
int covered;
|
int covered;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (pfn_cnt == 0)
|
if (pfn_cnt == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start,
|
||||||
ha_region->covered_end_pfn = pg_start;
|
ha_region->covered_end_pfn = pg_start;
|
||||||
ha_region->end_pfn = rg_start + rg_size;
|
ha_region->end_pfn = rg_start + rg_size;
|
||||||
|
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||||
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
|
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
do_pg_range:
|
do_pg_range:
|
||||||
|
@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev)
|
||||||
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
|
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
|
||||||
struct hv_hotadd_state *has, *tmp;
|
struct hv_hotadd_state *has, *tmp;
|
||||||
struct hv_hotadd_gap *gap, *tmp_gap;
|
struct hv_hotadd_gap *gap, *tmp_gap;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (dm->num_pages_ballooned != 0)
|
if (dm->num_pages_ballooned != 0)
|
||||||
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
|
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
|
||||||
|
@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||||
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
|
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
|
||||||
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
|
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
|
||||||
list_del(&gap->list);
|
list_del(&gap->list);
|
||||||
|
@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev)
|
||||||
list_del(&has->list);
|
list_del(&has->list);
|
||||||
kfree(has);
|
kfree(has);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int balloon_suspend(struct hv_device *hv_dev)
|
static int balloon_suspend(struct hv_device *hv_dev)
|
||||||
|
|
Loading…
Reference in New Issue