Xen-Devel Archive on lore.kernel.org
 help / color / Atom feed
From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org, xen-devel@lists.xenproject.org
Cc: jgross@suse.com, sstabellini@kernel.org, konrad.wilk@oracle.com,
	ankur.a.arora@oracle.com, pbonzini@redhat.com,
	boris.ostrovsky@oracle.com, joao.m.martins@oracle.com
Subject: [RFC PATCH 10/16] xen/balloon: support ballooning in xenhost_t
Date: Thu,  9 May 2019 10:25:34 -0700
Message-ID: <20190509172540.12398-11-ankur.a.arora__39927.8947430261$1557422919$gmane$org@oracle.com> (raw)
In-Reply-To: <20190509172540.12398-1-ankur.a.arora@oracle.com>

Xen ballooning uses hollow struct pages (with the underlying GFNs being
populated/unpopulated via hypercalls) which are used by the grant logic
to map grants from other domains.

This patch allows the default xenhost to provide an alternate ballooning
allocation mechanism. This is expected to be useful for local xenhosts
(type xenhost_r0) because unlike Xen, where there is an external
hypervisor which can change the memory underneath a GFN, that is not
possible when the hypervisor is running in the same address space
as the entity doing the ballooning.

Co-developed-by: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 arch/x86/xen/enlighten_hvm.c       |  7 +++++++
 arch/x86/xen/enlighten_pv.c        |  8 ++++++++
 drivers/xen/balloon.c              | 19 ++++++++++++++++---
 drivers/xen/grant-table.c          |  4 ++--
 drivers/xen/privcmd.c              |  4 ++--
 drivers/xen/xen-selfballoon.c      |  2 ++
 drivers/xen/xenbus/xenbus_client.c |  6 +++---
 drivers/xen/xlate_mmu.c            |  4 ++--
 include/xen/balloon.h              |  4 ++--
 include/xen/xenhost.h              | 19 +++++++++++++++++++
 10 files changed, 63 insertions(+), 14 deletions(-)

diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index efe483ceeb9a..a371bb9ee478 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -130,9 +130,16 @@ xenhost_ops_t xh_hvm_ops = {
 	.setup_shared_info = xen_hvm_init_shared_info,
 	.reset_shared_info = xen_hvm_reset_shared_info,
 	.probe_vcpu_id = xen_hvm_probe_vcpu_id,
+
+	/* We just use the default method of ballooning. */
+	.alloc_ballooned_pages = NULL,
+	.free_ballooned_pages = NULL,
 };
 
 xenhost_ops_t xh_hvm_nested_ops = {
+	/* Nested xenhosts, are disallowed ballooning */
+	.alloc_ballooned_pages = NULL,
+	.free_ballooned_pages = NULL,
 };
 
 static void __init init_hvm_pv_info(void)
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 77b1a0d4aef2..2e94e02cdbb4 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1247,11 +1247,19 @@ xenhost_ops_t xh_pv_ops = {
 	.reset_shared_info = xen_pv_reset_shared_info,
 
 	.probe_vcpu_id = xen_pv_probe_vcpu_id,
+
+	/* We just use the default method of ballooning. */
+	.alloc_ballooned_pages = NULL,
+	.free_ballooned_pages = NULL,
 };
 
 xenhost_ops_t xh_pv_nested_ops = {
 	.cpuid_base = xen_pv_nested_cpuid_base,
 	.setup_hypercall_page = NULL,
+
+	/* Nested xenhosts, are disallowed ballooning */
+	.alloc_ballooned_pages = NULL,
+	.free_ballooned_pages = NULL,
 };
 
 /* First C function to be called on Xen boot */
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5ef4d6ad920d..08becf574743 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -63,6 +63,7 @@
 #include <asm/tlb.h>
 
 #include <xen/interface/xen.h>
+#include <xen/xenhost.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
@@ -583,12 +584,21 @@ static int add_ballooned_pages(int nr_pages)
  * @pages: pages returned
  * @return 0 on success, error otherwise
  */
-int alloc_xenballooned_pages(int nr_pages, struct page **pages)
+int alloc_xenballooned_pages(xenhost_t *xh, int nr_pages, struct page **pages)
 {
 	int pgno = 0;
 	struct page *page;
 	int ret;
 
+	/*
+	 * xenmem transactions for remote xenhost are disallowed.
+	 */
+	if (xh->type == xenhost_r2)
+		return -EINVAL;
+
+	if (xh->ops->alloc_ballooned_pages)
+		return xh->ops->alloc_ballooned_pages(xh, nr_pages, pages);
+
 	mutex_lock(&balloon_mutex);
 
 	balloon_stats.target_unpopulated += nr_pages;
@@ -620,7 +630,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
 	return 0;
  out_undo:
 	mutex_unlock(&balloon_mutex);
-	free_xenballooned_pages(pgno, pages);
+	free_xenballooned_pages(xh, pgno, pages);
 	return ret;
 }
 EXPORT_SYMBOL(alloc_xenballooned_pages);
@@ -630,10 +640,13 @@ EXPORT_SYMBOL(alloc_xenballooned_pages);
  * @nr_pages: Number of pages
  * @pages: pages to return
  */
-void free_xenballooned_pages(int nr_pages, struct page **pages)
+void free_xenballooned_pages(xenhost_t *xh, int nr_pages, struct page **pages)
 {
 	int i;
 
+	if (xh->ops->free_ballooned_pages)
+		return xh->ops->free_ballooned_pages(xh, nr_pages, pages);
+
 	mutex_lock(&balloon_mutex);
 
 	for (i = 0; i < nr_pages; i++) {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 98af259d0d4f..ec90769907a4 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -804,7 +804,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
 {
 	int ret;
 
-	ret = alloc_xenballooned_pages(nr_pages, pages);
+	ret = alloc_xenballooned_pages(xh_default, nr_pages, pages);
 	if (ret < 0)
 		return ret;
 
@@ -839,7 +839,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 void gnttab_free_pages(int nr_pages, struct page **pages)
 {
 	gnttab_pages_clear_private(nr_pages, pages);
-	free_xenballooned_pages(nr_pages, pages);
+	free_xenballooned_pages(xh_default, nr_pages, pages);
 }
 EXPORT_SYMBOL_GPL(gnttab_free_pages);
 
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b5541f862720..88cd99e4f5c1 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -427,7 +427,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
 	if (pages == NULL)
 		return -ENOMEM;
 
-	rc = alloc_xenballooned_pages(numpgs, pages);
+	rc = alloc_xenballooned_pages(xh_default, numpgs, pages);
 	if (rc != 0) {
 		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
 			numpgs, rc);
@@ -928,7 +928,7 @@ static void privcmd_close(struct vm_area_struct *vma)
 
 	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
 	if (rc == 0)
-		free_xenballooned_pages(numpgs, pages);
+		free_xenballooned_pages(xh_default, numpgs, pages);
 	else
 		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
 			numpgs, rc);
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 246f6122c9ee..83a3995a33e3 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -74,6 +74,8 @@
 #include <linux/mman.h>
 #include <linux/workqueue.h>
 #include <linux/device.h>
+#include <xen/interface/xen.h>
+#include <xen/xenhost.h>
 #include <xen/balloon.h>
 #include <xen/tmem.h>
 #include <xen/xen.h>
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index f0cf47765726..5748fbaf0238 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -563,7 +563,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
 	if (!node)
 		return -ENOMEM;
 
-	err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
+	err = alloc_xenballooned_pages(xh_default, nr_pages, node->hvm.pages);
 	if (err)
 		goto out_err;
 
@@ -602,7 +602,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
 			 addr, nr_pages);
  out_free_ballooned_pages:
 	if (!leaked)
-		free_xenballooned_pages(nr_pages, node->hvm.pages);
+		free_xenballooned_pages(xh_default, nr_pages, node->hvm.pages);
  out_err:
 	kfree(node);
 	return err;
@@ -849,7 +849,7 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 			       info.addrs);
 	if (!rv) {
 		vunmap(vaddr);
-		free_xenballooned_pages(nr_pages, node->hvm.pages);
+		free_xenballooned_pages(xh_default, nr_pages, node->hvm.pages);
 	}
 	else
 		WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index e7df65d32c91..f25a80a4076b 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -233,7 +233,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
 		kfree(pages);
 		return -ENOMEM;
 	}
-	rc = alloc_xenballooned_pages(nr_pages, pages);
+	rc = alloc_xenballooned_pages(xh_default, nr_pages, pages);
 	if (rc) {
 		pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
 			nr_pages, rc);
@@ -250,7 +250,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
 	if (!vaddr) {
 		pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
 			nr_pages, rc);
-		free_xenballooned_pages(nr_pages, pages);
+		free_xenballooned_pages(xh_default, nr_pages, pages);
 		kfree(pages);
 		kfree(pfns);
 		return -ENOMEM;
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 4914b93a23f2..e8fb5a5ef490 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -24,8 +24,8 @@ extern struct balloon_stats balloon_stats;
 
 void balloon_set_new_target(unsigned long target);
 
-int alloc_xenballooned_pages(int nr_pages, struct page **pages);
-void free_xenballooned_pages(int nr_pages, struct page **pages);
+int alloc_xenballooned_pages(xenhost_t *xh, int nr_pages, struct page **pages);
+void free_xenballooned_pages(xenhost_t *xh, int nr_pages, struct page **pages);
 
 struct device;
 #ifdef CONFIG_XEN_SELFBALLOONING
diff --git a/include/xen/xenhost.h b/include/xen/xenhost.h
index c9dabf739ff8..9e08627a9e3e 100644
--- a/include/xen/xenhost.h
+++ b/include/xen/xenhost.h
@@ -198,6 +198,25 @@ typedef struct xenhost_ops {
 	 * get accessed via pv_ops.irq.* and the evtchn logic.
 	 */
 	void (*probe_vcpu_id)(xenhost_t *xenhost, int cpu);
+
+	/*
+	 * We only want to do ballooning with the default xenhost -- two
+	 * hypervisors managing a guest's memory is unlikely to lead anywhere
+	 * good and xenballooned frames obtained from the default xenhost can
+	 * be just as well populated by the remote xenhost (which is what we
+	 * will need it for.)
+	 *
+	 * xenhost_r1: unchanged from before.
+	 * xenhost_r2: disallowed.
+	 * xenhost_r0: for a local xenhost, unlike Xen, there's no external entity
+	 *  which can remap pages, so the balloon alocation here just returns page-0.
+	 *  When the allocated page is used (in GNTTABOP_map_grant_ref), we fix this
+	 *  up by returning the correct page.
+	 */
+
+	int (*alloc_ballooned_pages)(xenhost_t *xh, int nr_pages, struct page **pages);
+	void (*free_ballooned_pages)(xenhost_t *xh, int nr_pages, struct page **pages);
+
 } xenhost_ops_t;
 
 extern xenhost_t *xh_default, *xh_remote;
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply index

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-09 17:25 [Xen-devel] [RFC PATCH 00/16] xenhost support Ankur Arora
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 01/16] x86/xen: add xenhost_t interface Ankur Arora
2019-06-07 15:04   ` Juergen Gross
2019-06-11  7:16     ` Ankur Arora
2019-06-14 11:52       ` Juergen Gross
2019-05-09 17:25 ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 02/16] x86/xen: cpuid support in xenhost_t Ankur Arora
2019-05-09 17:25   ` [Xen-devel] " Ankur Arora
2019-06-12 21:09   ` Andrew Cooper
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 03/16] x86/xen: make hypercall_page generic Ankur Arora
2019-05-09 17:25 ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 04/16] x86/xen: hypercall support for xenhost_t Ankur Arora
2019-05-09 17:25 ` [Xen-devel] " Ankur Arora
2019-06-12 21:15   ` Andrew Cooper
2019-06-14  7:20     ` Ankur Arora
2019-06-14  7:35       ` Juergen Gross
2019-06-14  8:00         ` Andrew Cooper
2019-05-09 17:25 ` [RFC PATCH 05/16] x86/xen: add feature support in xenhost_t Ankur Arora
2019-05-09 17:25 ` [Xen-devel] " Ankur Arora
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 06/16] x86/xen: add shared_info support to xenhost_t Ankur Arora
2019-06-07 15:08   ` Juergen Gross
2019-06-08  5:01     ` Ankur Arora
2019-05-09 17:25 ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 07/16] x86/xen: make vcpu_info part of xenhost_t Ankur Arora
2019-05-09 17:25   ` [Xen-devel] " Ankur Arora
2019-06-14 11:53   ` Juergen Gross
2019-06-17  6:28     ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 08/16] x86/xen: irq/upcall handling with multiple xenhosts Ankur Arora
2019-05-09 17:25 ` [Xen-devel] " Ankur Arora
2019-06-14 12:01   ` Juergen Gross
2019-05-09 17:25 ` [RFC PATCH 09/16] xen/evtchn: support evtchn in xenhost_t Ankur Arora
2019-05-09 17:25 ` [Xen-devel] " Ankur Arora
2019-06-14 12:04   ` Juergen Gross
2019-06-17  6:09     ` Ankur Arora
2019-05-09 17:25 ` Ankur Arora [this message]
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 10/16] xen/balloon: support ballooning " Ankur Arora
2019-06-17  9:28   ` Juergen Gross
2019-06-19  2:24     ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 11/16] xen/grant-table: make grant-table xenhost aware Ankur Arora
2019-05-09 17:25   ` [Xen-devel] " Ankur Arora
2019-06-17  9:36   ` Juergen Gross
2019-06-19  2:25     ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 12/16] xen/xenbus: support xenbus frontend/backend with xenhost_t Ankur Arora
2019-05-09 17:25 ` [Xen-devel] " Ankur Arora
2019-06-17  9:50   ` Juergen Gross
2019-06-19  2:38     ` Ankur Arora
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 13/16] drivers/xen: gnttab, evtchn, xenbus API changes Ankur Arora
2019-06-17 10:07   ` Juergen Gross
2019-06-19  2:55     ` Ankur Arora
2019-05-09 17:25 ` Ankur Arora
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 14/16] xen/blk: " Ankur Arora
2019-06-17 10:14   ` Juergen Gross
2019-06-19  2:59     ` Ankur Arora
2019-05-09 17:25 ` Ankur Arora
2019-05-09 17:25 ` [RFC PATCH 15/16] xen/net: " Ankur Arora
2019-05-09 17:25 ` [Xen-devel] " Ankur Arora
2019-06-17 10:14   ` Juergen Gross
2019-05-09 17:25 ` [Xen-devel] [RFC PATCH 16/16] xen/grant-table: host_addr fixup in mapping on xenhost_r0 Ankur Arora
2019-06-17 10:55   ` Juergen Gross
2019-06-19  3:02     ` Ankur Arora
2019-05-09 17:25 ` Ankur Arora
2019-06-07 14:51 ` [Xen-devel] [RFC PATCH 00/16] xenhost support Juergen Gross
2019-06-07 15:22   ` Joao Martins
2019-06-07 16:21     ` Juergen Gross
2019-06-08  5:50       ` Ankur Arora
2019-06-08  5:33   ` Ankur Arora

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='20190509172540.12398-11-ankur.a.arora__39927.8947430261$1557422919$gmane$org@oracle.com' \
    --to=ankur.a.arora@oracle.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=jgross@suse.com \
    --cc=joao.m.martins@oracle.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Xen-Devel Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/xen-devel/0 xen-devel/git/0.git
	git clone --mirror https://lore.kernel.org/xen-devel/1 xen-devel/git/1.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 xen-devel xen-devel/ https://lore.kernel.org/xen-devel \
		xen-devel@lists.xenproject.org xen-devel@archiver.kernel.org
	public-inbox-index xen-devel


Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.xenproject.lists.xen-devel


AGPL code for this site: git clone https://public-inbox.org/ public-inbox