aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorNadav Amit <namit@vmware.com>2018-09-20 10:30:13 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-09-25 20:11:42 +0200
commit8fa3c61a79868ad3529f1dc61709a4c46adab467 (patch)
treed9efc09fcf206e22284dc44ceb8173bda0886fe7 /drivers/misc
parentvmw_balloon: change batch/single lock abstractions (diff)
downloadlinux-dev-8fa3c61a79868ad3529f1dc61709a4c46adab467.tar.xz
linux-dev-8fa3c61a79868ad3529f1dc61709a4c46adab467.zip
vmw_balloon: treat all refused pages equally
Currently, when the hypervisor rejects a page during lock operation, the VM treats pages differently according to the error-code: in certain cases the page is immediately freed, and in others it is put on a rejection list and only freed later. The behavior does not make too much sense. If the page is freed immediately it is very likely to be used again in the next batch of allocations, and be rejected again. In addition, for support of compaction and OOM notifiers, we wish to separate the logic that communicates with the hypervisor (as well as analyzes the status of each page) from the logic that allocates or free pages. Treat all errors the same way, queuing the pages on the refuse list. Move to the next allocation size (4k) when too many pages are refused. Free the refused pages when moving to the next size to avoid situations in which too much memory is waiting to be freed on the refused list. Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com> Signed-off-by: Nadav Amit <namit@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/vmw_balloon.c52
1 files changed, 29 insertions, 23 deletions
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 96dde120bbd5..4e067d269706 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -543,29 +543,13 @@ static int vmballoon_lock(struct vmballoon *b, unsigned int num_pages,
/* Error occurred */
STATS_INC(b->stats.refused_alloc[is_2m_pages]);
- switch (status) {
- case VMW_BALLOON_ERROR_PPN_PINNED:
- case VMW_BALLOON_ERROR_PPN_INVALID:
- /*
- * Place page on the list of non-balloonable pages
- * and retry allocation, unless we already accumulated
- * too many of them, in which case take a breather.
- */
- if (page_size->n_refused_pages
- < VMW_BALLOON_MAX_REFUSED) {
- list_add(&p->lru, &page_size->refused_pages);
- page_size->n_refused_pages++;
- break;
- }
- /* Fallthrough */
- case VMW_BALLOON_ERROR_RESET:
- case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
- vmballoon_free_page(p, is_2m_pages);
- break;
- default:
- /* This should never happen */
- WARN_ON_ONCE(true);
- }
+ /*
+ * Place page on the list of non-balloonable pages
+ * and retry allocation, unless we already accumulated
+ * too many of them, in which case take a breather.
+ */
+ list_add(&p->lru, &page_size->refused_pages);
+ page_size->n_refused_pages++;
}
return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
@@ -712,9 +696,31 @@ static void vmballoon_inflate(struct vmballoon *b)
vmballoon_add_page(b, num_pages++, page);
if (num_pages == b->batch_max_pages) {
+ struct vmballoon_page_size *page_size =
+ &b->page_sizes[is_2m_pages];
+
error = vmballoon_lock(b, num_pages, is_2m_pages);
num_pages = 0;
+
+ /*
+ * Stop allocating this page size if we already
+ * accumulated too many pages that the hypervisor
+ * refused.
+ */
+ if (page_size->n_refused_pages >=
+ VMW_BALLOON_MAX_REFUSED) {
+ if (!is_2m_pages)
+ break;
+
+ /*
+ * Release the refused pages as we move to 4k
+ * pages.
+ */
+ vmballoon_release_refused_pages(b, true);
+ is_2m_pages = true;
+ }
+
if (error)
break;
}