All of lore.kernel.org
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: wei.liu2@citrix.com, andrew.cooper3@citrix.com,
	ian.jackson@eu.citrix.com, jbeulich@suse.com,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	roger.pau@citrix.com
Subject: [PATCH v6 09/12] tools: Call XEN_DOMCTL_acpi_access on PVH VCPU hotplug
Date: Tue,  3 Jan 2017 09:04:13 -0500	[thread overview]
Message-ID: <1483452256-2879-10-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1483452256-2879-1-git-send-email-boris.ostrovsky@oracle.com>

Provide libxc interface for accessing ACPI via XEN_DOMCTL_acpi_access.

When a VCPU is hot-(un)plugged to/from a PVH guest update VCPU map
by writing to ACPI's XEN_ACPI_CPU_MAP register and then set GPE0
status bit in GPE0.status.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v6:
* Fix xc_acpi_access() by updating the val pointer passed to the hypercall
  and take some domctl initializers out of the loop
* Don't update GPE0 status on VCPU map update as it is no longer necessary


 tools/libxc/include/xenctrl.h | 20 ++++++++++++++++++++
 tools/libxc/xc_domain.c       | 41 +++++++++++++++++++++++++++++++++++++++++
 tools/libxl/libxl.c           |  4 ++++
 tools/libxl/libxl_arch.h      |  4 ++++
 tools/libxl/libxl_arm.c       |  6 ++++++
 tools/libxl/libxl_dom.c       | 10 ++++++++++
 tools/libxl/libxl_x86.c       | 11 +++++++++++
 7 files changed, 96 insertions(+)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 4ab0f57..3d771bc 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2710,6 +2710,26 @@ int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout);
 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout);
 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout);
 
+int xc_acpi_access(xc_interface *xch, domid_t domid,
+                   uint8_t rw, uint8_t space_id, unsigned long addr,
+                   unsigned int bytes, void *val);
+
+static inline int xc_acpi_ioread(xc_interface *xch, domid_t domid,
+                                 unsigned long port,
+                                 unsigned int bytes, void *val)
+{
+    return xc_acpi_access(xch, domid, XEN_DOMCTL_ACPI_READ, XEN_ACPI_SYSTEM_IO,
+                          port, bytes, val);
+}
+
+static inline int xc_acpi_iowrite(xc_interface *xch, domid_t domid,
+                                  unsigned long port,
+                                  unsigned int bytes, void *val)
+{
+    return xc_acpi_access(xch, domid, XEN_DOMCTL_ACPI_WRITE, XEN_ACPI_SYSTEM_IO,
+                          port, bytes, val);
+}
+
 /* Compat shims */
 #include "xenctrl_compat.h"
 
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 296b852..ed1dddb 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2520,6 +2520,47 @@ int xc_domain_soft_reset(xc_interface *xch,
     domctl.domain = (domid_t)domid;
     return do_domctl(xch, &domctl);
 }
+
+int
+xc_acpi_access(xc_interface *xch, domid_t domid,
+               uint8_t rw, uint8_t space_id,
+               unsigned long address, unsigned int bytes, void *val)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(val, bytes, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    struct xen_domctl_acpi_access *access = &domctl.u.acpi_access;
+    unsigned int max_bytes = (1U << (sizeof(access->width) * 8)) - 1;
+    int ret;
+
+    memset(&domctl, 0, sizeof(domctl));
+    domctl.domain = domid;
+    domctl.cmd = XEN_DOMCTL_acpi_access;
+    access->space_id = space_id;
+    access->rw = rw;
+    access->address = address;
+
+    if ( (ret = xc_hypercall_bounce_pre(xch, val)) )
+        return ret;
+
+    while ( bytes != 0 )
+    {
+        access->width = bytes < max_bytes ? bytes : max_bytes;
+        set_xen_guest_handle_offset(domctl.u.acpi_access.val,
+                                    val, access->address - address);
+
+        if ( (ret = do_domctl(xch, &domctl)) )
+             goto out;
+
+        bytes -= access->width;
+        access->address += access->width;
+    }
+
+ out:
+    xc_hypercall_bounce_post(xch, val);
+
+    return ret;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index bbbb3de..d8306ff 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5147,7 +5147,11 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
     case LIBXL_DOMAIN_TYPE_HVM:
         switch (libxl__device_model_version_running(gc, domid)) {
         case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
+            break;
         case LIBXL_DEVICE_MODEL_VERSION_NONE:
+            rc = libxl__arch_set_vcpuonline(gc, domid, cpumap);
+            if (rc < 0)
+                LOGE(ERROR, "Can't change vcpu online map (%d)", rc);
             break;
         case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN:
             rc = libxl__set_vcpuonline_qmp(gc, domid, cpumap, &info);
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index 5e1fc60..9649c21 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -71,6 +71,10 @@ int libxl__arch_extra_memory(libxl__gc *gc,
                              const libxl_domain_build_info *info,
                              uint64_t *out);
 
+_hidden
+int libxl__arch_set_vcpuonline(libxl__gc *gc, uint32_t domid,
+                               libxl_bitmap *cpumap);
+
 #if defined(__i386__) || defined(__x86_64__)
 
 #define LAPIC_BASE_ADDRESS  0xfee00000
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index d842d88..93dc81e 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -126,6 +126,12 @@ out:
     return rc;
 }
 
+int libxl__arch_set_vcpuonline(libxl__gc *gc, uint32_t domid,
+                               libxl_bitmap *cpumap)
+{
+    return ERROR_FAIL;
+}
+
 static struct arch_info {
     const char *guest_type;
     const char *timer_compat;
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index d519c8d..ca8f7a2 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -309,6 +309,16 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
         return ERROR_FAIL;
     }
 
+    if ((info->type == LIBXL_DOMAIN_TYPE_HVM) &&
+        (libxl__device_model_version_running(gc, domid) ==
+         LIBXL_DEVICE_MODEL_VERSION_NONE)) {
+        rc = libxl__arch_set_vcpuonline(gc, domid, &info->avail_vcpus);
+        if (rc) {
+            LOG(ERROR, "Couldn't set available vcpu count (error %d)", rc);
+            return ERROR_FAIL;
+        }
+    }
+
     /*
      * Check if the domain has any CPU or node affinity already. If not, try
      * to build up the latter via automatic NUMA placement. In fact, in case
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index 5da7504..00c3891 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -3,6 +3,9 @@
 
 #include <xc_dom.h>
 
+#include <xen/arch-x86/xen.h>
+#include <xen/hvm/ioreq.h>
+
 int libxl__arch_domain_prepare_config(libxl__gc *gc,
                                       libxl_domain_config *d_config,
                                       xc_domain_configuration_t *xc_config)
@@ -368,6 +371,14 @@ int libxl__arch_extra_memory(libxl__gc *gc,
     return 0;
 }
 
+int libxl__arch_set_vcpuonline(libxl__gc *gc, uint32_t domid,
+			       libxl_bitmap *cpumap)
+{
+    /*Update VCPU map. */
+    return xc_acpi_iowrite(CTX->xch, domid, XEN_ACPI_CPU_MAP,
+                           cpumap->size, cpumap->map);
+}
+
 int libxl__arch_domain_init_hw_description(libxl__gc *gc,
                                            libxl_domain_build_info *info,
                                            libxl__domain_build_state *state,
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-01-03 14:04 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-03 14:04 [PATCH v6 00/12] PVH VCPU hotplug support Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 01/12] domctl: Add XEN_DOMCTL_acpi_access Boris Ostrovsky
2017-01-03 18:21   ` Daniel De Graaf
2017-01-03 20:51   ` Konrad Rzeszutek Wilk
2017-01-03 14:04 ` [PATCH v6 02/12] x86/save: public/arch-x86/hvm/save.h is available to hypervisor and tools only Boris Ostrovsky
2017-01-03 16:55   ` Jan Beulich
2017-01-03 14:04 ` [PATCH v6 03/12] pvh/acpi: Install handlers for ACPI-related PVH IO accesses Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 04/12] pvh/acpi: Handle ACPI accesses for PVH guests Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 05/12] x86/domctl: Handle ACPI access from domctl Boris Ostrovsky
2017-07-31 14:14   ` Ross Lagerwall
2017-07-31 14:59     ` Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 06/12] events/x86: Define SCI virtual interrupt Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 07/12] pvh: Send an SCI on VCPU hotplug event Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 08/12] libxl: Update xenstore on VCPU hotplug for all guest types Boris Ostrovsky
2017-01-04 10:36   ` Wei Liu
2017-01-03 14:04 ` Boris Ostrovsky [this message]
2017-01-03 14:04 ` [PATCH v6 10/12] pvh: Set online VCPU map to avail_vcpus Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 11/12] pvh/acpi: Save ACPI registers for PVH guests Boris Ostrovsky
2017-01-03 14:04 ` [PATCH v6 12/12] docs: Describe PVHv2's VCPU hotplug procedure Boris Ostrovsky
2017-01-03 16:58   ` Jan Beulich
2017-01-03 19:33     ` Boris Ostrovsky
2017-01-04  9:26       ` Jan Beulich
2017-01-03 18:19   ` Stefano Stabellini
2017-01-03 20:31     ` Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1483452256-2879-10-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.