xen/balloon: Mark pages PG_offline in balloon_append()
Let's move the __SetPageOffline() call which all callers perform into balloon_append(). In bp_state decrease_reservation(), pages are now marked PG_offline a little later than before, however, this should not matter for XEN. Suggested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
parent
dde3285ffa
commit
59b52f105f
|
@ -158,6 +158,8 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
|
|||
/* balloon_append: add the given page to the balloon. */
|
||||
static void balloon_append(struct page *page)
|
||||
{
|
||||
__SetPageOffline(page);
|
||||
|
||||
/* Lowmem is re-populated first, so highmem pages go at list tail. */
|
||||
if (PageHighMem(page)) {
|
||||
list_add_tail(&page->lru, &ballooned_pages);
|
||||
|
@ -372,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order)
|
|||
for (i = 0; i < size; i++) {
|
||||
p = pfn_to_page(start_pfn + i);
|
||||
__online_page_set_limits(p);
|
||||
__SetPageOffline(p);
|
||||
balloon_append(p);
|
||||
}
|
||||
mutex_unlock(&balloon_mutex);
|
||||
|
@ -466,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
state = BP_EAGAIN;
|
||||
break;
|
||||
}
|
||||
__SetPageOffline(page);
|
||||
adjust_managed_page_count(page, -1);
|
||||
xenmem_reservation_scrub_page(page);
|
||||
list_add(&page->lru, &pages);
|
||||
|
@ -648,11 +648,9 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
|
|||
mutex_lock(&balloon_mutex);
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (pages[i]) {
|
||||
__SetPageOffline(pages[i]);
|
||||
if (pages[i])
|
||||
balloon_append(pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
balloon_stats.target_unpopulated -= nr_pages;
|
||||
|
||||
|
@ -669,7 +667,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
|
|||
unsigned long pages)
|
||||
{
|
||||
unsigned long pfn, extra_pfn_end;
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* If the amount of usable memory has been limited (e.g., with
|
||||
|
@ -679,12 +676,10 @@ static void __init balloon_add_region(unsigned long start_pfn,
|
|||
extra_pfn_end = min(max_pfn, start_pfn + pages);
|
||||
|
||||
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
|
||||
page = pfn_to_page(pfn);
|
||||
/* totalram_pages and totalhigh_pages do not
|
||||
include the boot-time balloon extension, so
|
||||
don't subtract from it. */
|
||||
__SetPageOffline(page);
|
||||
balloon_append(page);
|
||||
balloon_append(pfn_to_page(pfn));
|
||||
}
|
||||
|
||||
balloon_stats.total_pages += extra_pfn_end - start_pfn;
|
||||
|
|
Loading…
Reference in New Issue