VMware balloon: stop locking pages when hypervisor tells us enough

When hypervisor decides to decrease target balloon size while the balloon
driver tries to lock pages hypervisor may respond with
VMW_BALLOON_PPN_NOTNEEDED.  Use this data and immediately stop reserving
pages and wait for the next update cycle to fetch new target instead of
continuing trying to lock pages until size of refused list grows above
VMW_BALLOON_MAX_REFUSED (16) pages.

As a result the driver stops bothering the hypervisor with its attempts to
lock more pages that are not needed anymore.  Most likely next order from
hypervisor will be to reduce ballon size anyway.

It is a small optimization.

Signed-off-by: Dmitry Torokhov <dtor@vmware.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Dmitry Torokhov 2011-01-12 17:01:07 -08:00 committed by Linus Torvalds
parent 17fecb5582
commit d27a0c06ec
1 changed files with 8 additions and 5 deletions

View File

@ -45,7 +45,7 @@
MODULE_AUTHOR("VMware, Inc."); MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
MODULE_VERSION("1.2.1.1-k"); MODULE_VERSION("1.2.1.2-k");
MODULE_ALIAS("dmi:*:svnVMware*:*"); MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl"); MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
@ -315,7 +315,8 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
* fear that guest will need it. Host may reject some pages, we need to * fear that guest will need it. Host may reject some pages, we need to
* check the return value and maybe submit a different page. * check the return value and maybe submit a different page.
*/ */
static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
unsigned int *hv_status)
{ {
unsigned long status, dummy; unsigned long status, dummy;
u32 pfn32; u32 pfn32;
@ -326,7 +327,7 @@ static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
STATS_INC(b->stats.lock); STATS_INC(b->stats.lock);
status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
if (vmballoon_check_status(b, status)) if (vmballoon_check_status(b, status))
return true; return true;
@ -410,6 +411,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
{ {
struct page *page; struct page *page;
gfp_t flags; gfp_t flags;
unsigned int hv_status;
bool locked = false; bool locked = false;
do { do {
@ -429,11 +431,12 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
} }
/* inform monitor */ /* inform monitor */
locked = vmballoon_send_lock_page(b, page_to_pfn(page)); locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
if (!locked) { if (!locked) {
STATS_INC(b->stats.refused_alloc); STATS_INC(b->stats.refused_alloc);
if (b->reset_required) { if (hv_status == VMW_BALLOON_ERROR_RESET ||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
__free_page(page); __free_page(page);
return -EIO; return -EIO;
} }