All of lore.kernel.org
 help / color / mirror / Atom feed
* [Patch 4/4] Refining Xsave/Xrestore support
@ 2010-10-27  7:05 Haitao Shan
  2010-10-27 10:25 ` Tim Deegan
  2010-10-27 10:39 ` Jan Beulich
  0 siblings, 2 replies; 11+ messages in thread
From: Haitao Shan @ 2010-10-27  7:05 UTC (permalink / raw)
  To: xen-devel, Keir Fraser; +Cc: Han, Weidong

[-- Attachment #1: Type: text/plain, Size: 115 bytes --]

Hi, Keir,

This is patch #4, which adds domain save/restore support when
Xsave/Xrestore is supported.

Shan Haitao

[-- Attachment #2: xsave-migration.patch --]
[-- Type: application/octet-stream, Size: 27077 bytes --]

Domain save/restore support when processors support Xsave/Xrestore,
including PV guest and HVM guest.
1. Adding a pairs of new hypercalls to saving/restoring PV guests.
2. Adding a new data chunk for saving/restoring HVM guests.

Signed-off-by:	Shan Haitao <haitao.shan@intel.com>
		Han Weidong <weidong.han@intel.com>

diff -r 9bf6b4030d70 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c	Wed Oct 27 21:55:45 2010 +0800
+++ b/tools/libxc/xc_domain_restore.c	Wed Oct 27 22:17:24 2010 +0800
@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
 /* Load the p2m frame list, plus potential extended info chunk */
 static xen_pfn_t *load_p2m_frame_list(
     xc_interface *xch, struct restore_ctx *ctx,
-    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
+    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
+    int *vcpuextstate, uint64_t *vcpuextstate_size)
 {
     xen_pfn_t *p2m_frame_list;
     vcpu_guest_context_any_t ctxt;
@@ -266,6 +267,13 @@ static xen_pfn_t *load_p2m_frame_list(
             {
                 *ext_vcpucontext = 1;
             }
+            else if ( !strncmp(chunk_sig, "xcnt", 4) )
+            {
+                *vcpuextstate = 1;
+                RDEXACT(io_fd, vcpuextstate_size, sizeof(*vcpuextstate_size));
+                tot_bytes -= chunk_bytes;
+                chunk_bytes = 0;
+            }
             
             /* Any remaining bytes of this chunk: read and discard. */
             while ( chunk_bytes )
@@ -449,7 +457,8 @@ static int dump_qemu(xc_interface *xch, 
 static int buffer_tail_hvm(xc_interface *xch, struct restore_ctx *ctx,
                            struct tailbuf_hvm *buf, int fd,
                            unsigned int max_vcpu_id, uint64_t vcpumap,
-                           int ext_vcpucontext)
+                           int ext_vcpucontext,
+                           int vcpuextstate, uint64_t vcpuextstate_size)
 {
     uint8_t *tmp;
     unsigned char qemusig[21];
@@ -511,7 +520,9 @@ static int buffer_tail_hvm(xc_interface 
 static int buffer_tail_pv(xc_interface *xch, struct restore_ctx *ctx,
                           struct tailbuf_pv *buf, int fd,
                           unsigned int max_vcpu_id, uint64_t vcpumap,
-                          int ext_vcpucontext)
+                          int ext_vcpucontext,
+                          int vcpuextstate,
+                          uint64_t vcpuextstate_size)
 {
     unsigned int i;
     size_t pfnlen, vcpulen;
@@ -551,6 +562,9 @@ static int buffer_tail_pv(xc_interface *
                : sizeof(vcpu_guest_context_x86_32_t)) * buf->vcpucount;
     if ( ext_vcpucontext )
         vcpulen += 128 * buf->vcpucount;
+    if ( vcpuextstate ) {
+        vcpulen += vcpuextstate_size * buf->vcpucount;
+    }
 
     if ( !(buf->vcpubuf) ) {
         if ( !(buf->vcpubuf = malloc(vcpulen)) ) {
@@ -589,14 +603,17 @@ static int buffer_tail_pv(xc_interface *
 
 static int buffer_tail(xc_interface *xch, struct restore_ctx *ctx,
                        tailbuf_t *buf, int fd, unsigned int max_vcpu_id,
-                       uint64_t vcpumap, int ext_vcpucontext)
+                       uint64_t vcpumap, int ext_vcpucontext,
+                       int vcpuextstate, uint64_t vcpuextstate_size)
 {
     if ( buf->ishvm )
         return buffer_tail_hvm(xch, ctx, &buf->u.hvm, fd, max_vcpu_id, vcpumap,
-                               ext_vcpucontext);
+                               ext_vcpucontext, vcpuextstate,
+                               vcpuextstate_size);
     else
         return buffer_tail_pv(xch, ctx, &buf->u.pv, fd, max_vcpu_id, vcpumap,
-                              ext_vcpucontext);
+                              ext_vcpucontext, vcpuextstate,
+                              vcpuextstate_size);
 }
 
 static void tailbuf_free_hvm(struct tailbuf_hvm *buf)
@@ -1051,6 +1068,8 @@ int xc_domain_restore(xc_interface *xch,
 {
     DECLARE_DOMCTL;
     int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0;
+    int vcpuextstate = 0;
+    uint64_t vcpuextstate_size = 0;
     unsigned long mfn, pfn;
     unsigned int prev_pc;
     int nraces = 0;
@@ -1065,6 +1084,9 @@ int xc_domain_restore(xc_interface *xch,
     /* A copy of the CPU context of the guest. */
     DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
 
+    /* A copy of the CPU eXtended States of the guest. */
+    DECLARE_HYPERCALL_BUFFER(void, buffer);
+
     /* A table containing the type of each PFN (/not/ MFN!). */
     unsigned long *pfn_type = NULL;
 
@@ -1151,7 +1173,9 @@ int xc_domain_restore(xc_interface *xch,
     {
         /* Load the p2m frame list, plus potential extended info chunk */
         p2m_frame_list = load_p2m_frame_list(xch, ctx,
-            io_fd, &pae_extended_cr3, &ext_vcpucontext);
+            io_fd, &pae_extended_cr3, &ext_vcpucontext,
+            &vcpuextstate, &vcpuextstate_size);
+
         if ( !p2m_frame_list )
             goto out;
 
@@ -1298,10 +1322,11 @@ int xc_domain_restore(xc_interface *xch,
     if ( !ctx->completed ) {
 
         if ( buffer_tail(xch, ctx, &tailbuf, io_fd, max_vcpu_id, vcpumap,
-                         ext_vcpucontext) < 0 ) {
+                         ext_vcpucontext, vcpuextstate, vcpuextstate_size) < 0 ) {
             ERROR ("error buffering image tail");
             goto out;
         }
+
         ctx->completed = 1;
 
         /*
@@ -1327,7 +1352,7 @@ int xc_domain_restore(xc_interface *xch,
     memset(&tmptail, 0, sizeof(tmptail));
     tmptail.ishvm = hvm;
     if ( buffer_tail(xch, ctx, &tmptail, io_fd, max_vcpu_id, vcpumap,
-                     ext_vcpucontext) < 0 ) {
+                     ext_vcpucontext, vcpuextstate, vcpuextstate_size) < 0 ) {
         ERROR ("error buffering image tail, finishing");
         goto finish;
     }
@@ -1648,7 +1673,7 @@ int xc_domain_restore(xc_interface *xch,
         }
 
         if ( !ext_vcpucontext )
-            continue;
+            goto vcpu_ext_state_restore;
         memcpy(&domctl.u.ext_vcpucontext, vcpup, 128);
         vcpup += 128;
         domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
@@ -1659,6 +1684,39 @@ int xc_domain_restore(xc_interface *xch,
             PERROR("Couldn't set extended vcpu%d info", i);
             goto out;
         }
+
+ vcpu_ext_state_restore:
+        if ( !vcpuextstate )
+            continue;
+
+        memcpy(&domctl.u.vcpuextstate.xfeature_mask, vcpup,
+               sizeof(domctl.u.vcpuextstate.xfeature_mask));
+        vcpup += sizeof(domctl.u.vcpuextstate.xfeature_mask);
+        memcpy(&domctl.u.vcpuextstate.size, vcpup,
+               sizeof(domctl.u.vcpuextstate.size));
+        vcpup += sizeof(domctl.u.vcpuextstate.size);
+
+        buffer = xc_hypercall_buffer_alloc(xch, buffer,
+                                           domctl.u.vcpuextstate.size);
+        if ( !buffer )
+        {
+            PERROR("Could not allocate buffer to restore eXtended States");
+            goto out;
+        }
+        memcpy(buffer, vcpup, domctl.u.vcpuextstate.size);
+        vcpup += domctl.u.vcpuextstate.size;
+
+        domctl.cmd = XEN_DOMCTL_setvcpuextstate;
+        domctl.domain = dom;
+        domctl.u.vcpuextstate.vcpu = i;
+        set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+        frc = xc_domctl(xch, &domctl);
+        if ( frc != 0 )
+        {
+            PERROR("Couldn't set eXtended States for vcpu%d", i);
+            goto out;
+        }
+        xc_hypercall_buffer_free(xch, buffer);
     }
 
     memcpy(shared_info_page, tailbuf.u.pv.shared_info_page, PAGE_SIZE);
diff -r 9bf6b4030d70 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c	Wed Oct 27 21:55:45 2010 +0800
+++ b/tools/libxc/xc_domain_save.c	Wed Oct 27 22:17:24 2010 +0800
@@ -810,14 +810,34 @@ static xen_pfn_t *map_and_save_p2m_table
                               ? sizeof(ctxt.x64) 
                               : sizeof(ctxt.x32));
         uint32_t chunk2_sz = 0;
-        uint32_t tot_sz    = (chunk1_sz + 8) + (chunk2_sz + 8);
+        uint32_t chunk3_sz = 8;
+        uint32_t tot_sz;
+        DECLARE_DOMCTL;
+
+        domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+        domctl.domain = dom;
+        domctl.u.vcpuextstate.vcpu = 0;
+        domctl.u.vcpuextstate.size = 0;
+        domctl.u.vcpuextstate.xfeature_mask = 0;
+        if ( xc_domctl(xch, &domctl) < 0 )
+        {
+            PERROR("No extended context for VCPU%d", i);
+            goto out;
+        }
+
+        tot_sz = (chunk1_sz + 8) + (chunk2_sz + 8) + (chunk3_sz + 8);
+
         if ( write_exact(io_fd, &signature, sizeof(signature)) ||
              write_exact(io_fd, &tot_sz, sizeof(tot_sz)) ||
              write_exact(io_fd, "vcpu", 4) ||
              write_exact(io_fd, &chunk1_sz, sizeof(chunk1_sz)) ||
              write_exact(io_fd, &ctxt, chunk1_sz) ||
              write_exact(io_fd, "extv", 4) ||
-             write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) )
+             write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) ||
+             write_exact(io_fd, "xcnt", 4) ||
+             write_exact(io_fd, &chunk3_sz, sizeof(chunk3_sz)) ||
+             write_exact(io_fd, &domctl.u.vcpuextstate.size,
+                         sizeof(domctl.u.vcpuextstate.size)) )
         {
             PERROR("write: extended info");
             goto out;
@@ -905,6 +925,9 @@ int xc_domain_save(xc_interface *xch, in
     /* base of the region in which domain memory is mapped */
     unsigned char *region_base = NULL;
 
+    /* A copy of the CPU eXtended States of the guest. */
+    DECLARE_HYPERCALL_BUFFER(void, buffer);
+
     /* bitmap of pages:
        - that should be sent this iteration (unless later marked as skip);
        - to skip this iteration because already dirty;
@@ -1786,6 +1809,53 @@ int xc_domain_save(xc_interface *xch, in
             PERROR("Error when writing to state file (2)");
             goto out;
         }
+
+        /* Start to fetch CPU eXtended States */
+        /* Get buffer size first */
+        domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+        domctl.domain = dom;
+        domctl.u.vcpuextstate.vcpu = i;
+        domctl.u.vcpuextstate.xfeature_mask = 0;
+        domctl.u.vcpuextstate.size = 0;
+        if ( xc_domctl(xch, &domctl) < 0 )
+        {
+            PERROR("No eXtended states (XSAVE) for VCPU%d", i);
+            goto out;
+        }
+
+        /* Getting eXtended states data */
+        buffer = xc_hypercall_buffer_alloc(xch, buffer, domctl.u.vcpuextstate.size);
+        if ( !buffer )
+        {
+            PERROR("Insufficient memory for getting eXtended states for"
+                   "VCPU%d", i);
+            goto out;
+        }
+        set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+        if ( xc_domctl(xch, &domctl) < 0 )
+        {
+            PERROR("No eXtended states (XSAVE) for VCPU%d", i);
+            goto out;
+        }
+
+        if ( wrexact(io_fd, &domctl.u.vcpuextstate.xfeature_mask,
+                     sizeof(domctl.u.vcpuextstate.xfeature_mask)) )
+        {
+            PERROR("Error when writing to state file (2)");
+            goto out;
+        }
+        if ( wrexact(io_fd, &domctl.u.vcpuextstate.size,
+                     sizeof(domctl.u.vcpuextstate.size)) )
+        {
+            PERROR("Error when writing to state file (2)");
+            goto out;
+        }
+        if ( wrexact(io_fd, buffer, domctl.u.vcpuextstate.size) )
+        {
+            PERROR("Error when writing to state file (2)");
+            goto out;
+        }
+        xc_hypercall_buffer_free(xch, buffer);
     }
 
     /*
diff -r 9bf6b4030d70 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/arch/x86/domctl.c	Wed Oct 27 22:17:24 2010 +0800
@@ -33,6 +33,7 @@
 #include <asm/mem_event.h>
 #include <public/mem_event.h>
 #include <asm/mem_sharing.h>
+#include <asm/i387.h>
 
 #ifdef XEN_KDB_CONFIG
 #include "../kdb/include/kdbdefs.h"
@@ -1406,6 +1407,124 @@ long arch_do_domctl(
     }
     break;
 
+    case XEN_DOMCTL_setvcpuextstate:
+    case XEN_DOMCTL_getvcpuextstate:
+    {
+        struct xen_domctl_vcpuextstate *evc;
+        struct domain *d;
+        struct vcpu *v;
+        uint32_t offset = 0;
+        uint64_t _xfeature_mask;
+
+#define PV_XSAVE_SIZE (2 * sizeof(uint64_t) + xsave_cntxt_size)
+
+        evc = &domctl->u.vcpuextstate;
+
+        ret = -ESRCH;
+        d = rcu_lock_domain_by_id(domctl->domain);
+        if ( d == NULL )
+            break;
+
+        ret = xsm_vcpuextstate(d, domctl->cmd);
+        if ( ret )
+            goto vcpuextstate_out;
+
+        ret = -ESRCH;
+        if ( (evc->vcpu >= d->max_vcpus) ||
+             ((v = d->vcpu[evc->vcpu]) == NULL) )
+            goto vcpuextstate_out;
+
+        if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
+        {
+            if ( !evc->size && !evc->xfeature_mask )
+            {
+                evc->xfeature_mask = xfeature_mask;
+                evc->size = PV_XSAVE_SIZE;
+                ret = 0;
+                goto vcpuextstate_out;
+            }
+            if ( evc->size != PV_XSAVE_SIZE ||
+                 evc->xfeature_mask != xfeature_mask )
+            {
+                ret = EFAULT;
+                goto vcpuextstate_out;
+            }
+            if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+                                      offset, (void *)&v->arch.xcr0,
+                                      sizeof(v->arch.xcr0)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0);
+            if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+                                      offset, (void *)&v->arch.xcr0_accum,
+                                      sizeof(v->arch.xcr0_accum)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0_accum);
+            if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+                                      offset, v->arch.xsave_area,
+                                      xsave_cntxt_size) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+        }
+        else
+        {
+            ret = -EINVAL;
+
+            _xfeature_mask = evc->xfeature_mask;
+            /* xsave context must be restored on compatible target CPUs */
+            if ( (_xfeature_mask & xfeature_mask) != _xfeature_mask )
+                goto vcpuextstate_out;
+            if ( evc->size > PV_XSAVE_SIZE )
+                goto vcpuextstate_out;
+
+            if ( copy_from_guest_offset((void *)&v->arch.xcr0,
+                                        domctl->u.vcpuextstate.buffer, offset,
+                                        sizeof(v->arch.xcr0_accum)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            if ( !(v->arch.xcr0 & XSTATE_FP) ||
+                 (v->arch.xcr0 & ~xfeature_mask) )
+            {
+                ret = -EINVAL;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0);
+            if ( copy_from_guest_offset((void *)&v->arch.xcr0_accum,
+                                        domctl->u.vcpuextstate.buffer, offset,
+                                        sizeof(v->arch.xcr0_accum)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0_accum);
+            if ( copy_from_guest_offset(v->arch.xsave_area,
+                                        domctl->u.vcpuextstate.buffer, offset,
+                                        xsave_cntxt_size) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+        }
+
+        ret = 0;
+
+    vcpuextstate_out:
+        rcu_unlock_domain(d);
+        if ( (domctl->cmd == XEN_DOMCTL_getvcpuextstate) &&
+             copy_to_guest(u_domctl, domctl, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
 #ifdef __x86_64__
     case XEN_DOMCTL_mem_event_op:
     {
@@ -1455,6 +1574,11 @@ void arch_get_info_guest(struct vcpu *v,
 #define c(fld) (c.nat->fld)
 #endif
 
+    /* Fill legacy context from xsave area first */
+    if ( cpu_has_xsave )
+        memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
+               sizeof(v->arch.guest_context.fpu_ctxt));
+
     if ( !is_pv_32on64_domain(v->domain) )
         memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
 #ifdef CONFIG_COMPAT
diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/arch/x86/hvm/hvm.c	Wed Oct 27 22:17:24 2010 +0800
@@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
         vc = &v->arch.guest_context;
 
         if ( v->fpu_initialised )
-            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
-        else 
+            if ( cpu_has_xsave )
+                /* to restore guest img saved on xsave-incapable host */
+                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
+                       sizeof(ctxt.fpu_regs));
+            else
+                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+        else
             memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
 
         ctxt.rax = vc->user_regs.eax;
@@ -799,6 +804,113 @@ static int hvm_load_cpu_ctxt(struct doma
 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
                           1, HVMSR_PER_VCPU);
 
+#define HVM_CPU_XSAVE_SIZE  (3 * sizeof(uint64_t) + xsave_cntxt_size)
+
+static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_hw_cpu_xsave *ctxt;
+
+    if ( !cpu_has_xsave )
+        return 0;   /* do nothing */
+
+    for_each_vcpu ( d, v )
+    {
+        if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, HVM_CPU_XSAVE_SIZE) )
+            return 1;
+        ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+        h->cur += HVM_CPU_XSAVE_SIZE;
+        memset(ctxt, 0, HVM_CPU_XSAVE_SIZE);
+
+        ctxt->xfeature_mask = xfeature_mask;
+        ctxt->xcr0 = v->arch.xcr0;
+        ctxt->xcr0_accum = v->arch.xcr0_accum;
+        if ( v->fpu_initialised )
+            memcpy(&ctxt->save_area,
+                v->arch.xsave_area, xsave_cntxt_size);
+    }
+
+    return 0;
+}
+
+static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid;
+    struct vcpu *v;
+    struct hvm_hw_cpu_xsave *ctxt;
+    struct hvm_save_descriptor *desc;
+    uint64_t _xfeature_mask;
+
+    /* fails since we can't restore an img saved on xsave-capable host */
+//XXX: 
+    if ( !cpu_has_xsave )
+        return -EINVAL;
+
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
+        return -EINVAL;
+    }
+
+    /* Customized checking for entry since our entry is of variable length */
+    desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+    if ( sizeof (*desc) > h->size - h->cur)
+    {
+        gdprintk(XENLOG_WARNING,
+                 "HVM restore: not enough data left to read descriptpr"
+                 "for type %u\n", CPU_XSAVE_CODE);
+        return -1;
+    }
+    if ( desc->length + sizeof (*desc) > h->size - h->cur)
+    {
+        gdprintk(XENLOG_WARNING,
+                 "HVM restore: not enough data left to read %u bytes "
+                 "for type %u\n", desc->length, CPU_XSAVE_CODE);
+        return -1;
+    }
+    if ( CPU_XSAVE_CODE != desc->typecode || (desc->length > HVM_CPU_XSAVE_SIZE) )
+    {
+        gdprintk(XENLOG_WARNING,
+                 "HVM restore mismatch: expected type %u with max length %u, "
+                 "saw type %u length %u\n", CPU_XSAVE_CODE,
+                 (uint32_t)HVM_CPU_XSAVE_SIZE,
+                 desc->typecode, desc->length);
+        return -1;
+    }
+    h->cur += sizeof (*desc);
+    /* Checking finished */
+
+    ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+    h->cur += desc->length;
+
+    _xfeature_mask = ctxt->xfeature_mask;
+    if ( (_xfeature_mask & xfeature_mask) != xfeature_mask )
+        return -EINVAL;
+
+    v->arch.xcr0 = ctxt->xcr0;
+    v->arch.xcr0_accum = ctxt->xcr0_accum;
+    memcpy(v->arch.xsave_area, &ctxt->save_area, xsave_cntxt_size);
+
+    return 0;
+}
+
+/* We need variable length data chunk for xsave area, hence customized
+ * declaration other than HVM_REGISTER_SAVE_RESTORE.
+ */
+static int __hvm_register_CPU_XSAVE_save_and_restore(void)
+{
+    hvm_register_savevm(CPU_XSAVE_CODE,
+                        "CPU_XSAVE",
+                        hvm_save_cpu_xsave_states,
+                        hvm_load_cpu_xsave_states,
+                        HVM_CPU_XSAVE_SIZE + sizeof (struct hvm_save_descriptor),
+                        HVMSR_PER_VCPU);
+    return 0;
+}
+__initcall(__hvm_register_CPU_XSAVE_save_and_restore);
+
 int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
diff -r 9bf6b4030d70 xen/include/public/arch-x86/hvm/save.h
--- a/xen/include/public/arch-x86/hvm/save.h	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/include/public/arch-x86/hvm/save.h	Wed Oct 27 22:17:24 2010 +0800
@@ -431,9 +431,32 @@ struct hvm_viridian_context {
 
 DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
 
+
+/*
+ * The save area of XSAVE/XRSTOR.
+ */
+
+struct hvm_hw_cpu_xsave {
+    uint64_t xfeature_mask;
+    uint64_t xcr0;                 /* Updated by XSETBV */
+    uint64_t xcr0_accum;           /* Updated by XSETBV */
+    struct {
+        struct { char x[512]; } fpu_sse;
+
+        struct {
+            uint64_t xstate_bv;         /* Updated by XRSTOR */
+            uint64_t reserved[7];
+        } xsave_hdr;                    /* The 64-byte header */
+
+        struct { char x[0]; } ymm;    /* YMM */
+    } save_area;
+} __attribute__((packed));
+
+#define CPU_XSAVE_CODE  16
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 15
+#define HVM_SAVE_CODE_MAX 16
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
diff -r 9bf6b4030d70 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/include/public/domctl.h	Wed Oct 27 22:17:24 2010 +0800
@@ -781,6 +781,31 @@ struct xen_domctl_mem_sharing_op {
 typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
 
+/* XEN_DOMCTL_setvcpuextstate */
+/* XEN_DOMCTL_getvcpuextstate */
+struct xen_domctl_vcpuextstate {
+    /* IN: VCPU that this call applies to. */
+    uint32_t         vcpu;
+    /*
+     * SET: xfeature support mask of struct (IN)
+     * GET: xfeature support mask of struct (IN/OUT)
+     * xfeature mask is served as identifications of the saving format
+     * so that compatible CPUs can have a check on format to decide
+     * whether it can restore.
+     */
+    uint64_t         xfeature_mask;
+    /*
+     * SET: Size of struct (IN)
+     * GET: Size of struct (IN/OUT)
+     */
+    uint64_t         size;
+#if defined(__i386__) || defined(__x86_64__)
+    XEN_GUEST_HANDLE_64(uint64) buffer;
+#endif
+};
+typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -841,6 +866,8 @@ struct xen_domctl {
 #define XEN_DOMCTL_gettscinfo                    59
 #define XEN_DOMCTL_settscinfo                    60
 #define XEN_DOMCTL_getpageframeinfo3             61
+#define XEN_DOMCTL_setvcpuextstate               62
+#define XEN_DOMCTL_getvcpuextstate               63
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -889,6 +916,7 @@ struct xen_domctl {
         struct xen_domctl_debug_op          debug_op;
         struct xen_domctl_mem_event_op      mem_event_op;
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
+        struct xen_domctl_vcpuextstate      vcpuextstate;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
 #endif
diff -r 9bf6b4030d70 xen/include/xsm/xsm.h
--- a/xen/include/xsm/xsm.h	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/include/xsm/xsm.h	Wed Oct 27 22:17:24 2010 +0800
@@ -149,6 +149,7 @@ struct xsm_operations {
     int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
     int (*pin_mem_cacheattr) (struct domain *d);
     int (*ext_vcpucontext) (struct domain *d, uint32_t cmd);
+    int (*vcpuextstate) (struct domain *d, uint32_t cmd);
 #endif
 };
 
@@ -622,6 +623,10 @@ static inline int xsm_ext_vcpucontext(st
 {
     return xsm_call(ext_vcpucontext(d, cmd));
 }
+static inline int xsm_vcpuextstate(struct domain *d, uint32_t cmd)
+{
+    return xsm_call(vcpuextstate(d, cmd));
+}
 #endif /* CONFIG_X86 */
 
 #endif /* __XSM_H */
diff -r 9bf6b4030d70 xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/xsm/flask/hooks.c	Wed Oct 27 22:17:24 2010 +0800
@@ -1177,6 +1177,25 @@ static int flask_ext_vcpucontext (struct
 
     return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm);
 }
+
+static int flask_vcpuextstate (struct domain *d, uint32_t cmd)
+{
+    u32 perm;
+
+    switch ( cmd )
+    {
+        case XEN_DOMCTL_setvcpuextstate:
+            perm = DOMAIN__SETVCPUEXTSTATE;
+        break;
+        case XEN_DOMCTL_getvcpuextstate:
+            perm = DOMAIN__GETVCPUEXTSTATE;
+        break;
+        default:
+            return -EPERM;
+    }
+
+    return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm);
+}
 #endif
 
 static int io_has_perm(struct domain *d, char *name, unsigned long s, 
@@ -1328,6 +1347,7 @@ static struct xsm_operations flask_ops =
     .bind_pt_irq = flask_bind_pt_irq,
     .pin_mem_cacheattr = flask_pin_mem_cacheattr,
     .ext_vcpucontext = flask_ext_vcpucontext,
+    .vcpuextstate = flask_vcpuextstate,
 #endif
 };
 
diff -r 9bf6b4030d70 xen/xsm/flask/include/av_permissions.h
--- a/xen/xsm/flask/include/av_permissions.h	Wed Oct 27 21:55:45 2010 +0800
+++ b/xen/xsm/flask/include/av_permissions.h	Wed Oct 27 22:17:24 2010 +0800
@@ -51,6 +51,8 @@
 #define DOMAIN__TRIGGER                           0x00800000UL
 #define DOMAIN__GETEXTVCPUCONTEXT                 0x01000000UL
 #define DOMAIN__SETEXTVCPUCONTEXT                 0x02000000UL
+#define DOMAIN__GETVCPUEXTSTATE                   0x04000000UL
+#define DOMAIN__SETVCPUEXTSTATE                   0x08000000UL
 
 #define HVM__SETHVMC                              0x00000001UL
 #define HVM__GETHVMC                              0x00000002UL

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-27  7:05 [Patch 4/4] Refining Xsave/Xrestore support Haitao Shan
@ 2010-10-27 10:25 ` Tim Deegan
       [not found]   ` <AANLkTi=1fW3rQL+8SRAzYv3D6Lqo2PGC7uYzd5VkX8hw@mail.gmail.com>
  2010-10-28  4:57   ` Haitao Shan
  2010-10-27 10:39 ` Jan Beulich
  1 sibling, 2 replies; 11+ messages in thread
From: Tim Deegan @ 2010-10-27 10:25 UTC (permalink / raw)
  To: Haitao Shan; +Cc: Keir, xen-devel, Han, Weidong, Fraser

Hi,

Thanks for this - good to see XSAVE save/restore working.  I've no
comments on the tools part of this patch; it looks plausible but I
haven't reviewed it closely.

On the Xen HVM side:

> diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c	Wed Oct 27 21:55:45 2010 +0800
> +++ b/xen/arch/x86/hvm/hvm.c	Wed Oct 27 22:17:24 2010 +0800
> @@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
>          vc = &v->arch.guest_context;
>  
>          if ( v->fpu_initialised )
> -            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
> -        else 
> +            if ( cpu_has_xsave )
> +                /* to restore guest img saved on xsave-incapable host */
> +                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
> +                       sizeof(ctxt.fpu_regs));
> +            else
> +                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));

I think this hunk belongs in hvm_LOAD_cpu_ctxt()! 

> +        else
>              memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
>  
>          ctxt.rax = vc->user_regs.eax;

[...]

> +    ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
> +    h->cur += desc->length;
> +
> +    _xfeature_mask = ctxt->xfeature_mask;
> +    if ( (_xfeature_mask & xfeature_mask) != xfeature_mask )
> +        return -EINVAL;

This allows XSAVE records to be loaded on machines with fewer features.
Is that safe?

> +    v->arch.xcr0 = ctxt->xcr0;
> +    v->arch.xcr0_accum = ctxt->xcr0_accum;
> +    memcpy(v->arch.xsave_area, &ctxt->save_area, xsave_cntxt_size);
> +
> +    return 0;
> +}

Also, have you tested this on CPUs that don't support XSAVE?  The PV
hypercall looks like it will return -EFAULT after trying to
copy_from_user into a null pointer on the Xen side, but something more
explicit would be better.

Cheers,

Tim.

-- 
Tim Deegan <Tim.Deegan@citrix.com>
Principal Software Engineer, XenServer Engineering
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-27  7:05 [Patch 4/4] Refining Xsave/Xrestore support Haitao Shan
  2010-10-27 10:25 ` Tim Deegan
@ 2010-10-27 10:39 ` Jan Beulich
  2010-10-28  2:52   ` Haitao Shan
  1 sibling, 1 reply; 11+ messages in thread
From: Jan Beulich @ 2010-10-27 10:39 UTC (permalink / raw)
  To: Haitao Shan; +Cc: xen-devel, Weidong Han, Keir Fraser

>@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
> /* Load the p2m frame list, plus potential extended info chunk */
> static xen_pfn_t *load_p2m_frame_list(
>     xc_interface *xch, struct restore_ctx *ctx,
>-    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
>+    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
>+    int *vcpuextstate, uint64_t *vcpuextstate_size)

What value is it to have vcpuextstate_size (here any elsewhere in
the patch) be a 64-bit quantity? In 32-bit tools exceeding 4G here
wouldn't work anyway, and iirc the value really can't exceed 32 bits
anyway.

>@@ -781,6 +781,31 @@ struct xen_domctl_mem_sharing_op {
> typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
> 
>+/* XEN_DOMCTL_setvcpuextstate */
>+/* XEN_DOMCTL_getvcpuextstate */
>+struct xen_domctl_vcpuextstate {
>+    /* IN: VCPU that this call applies to. */
>+    uint32_t         vcpu;
>+    /*
>+     * SET: xfeature support mask of struct (IN)
>+     * GET: xfeature support mask of struct (IN/OUT)
>+     * xfeature mask is served as identifications of the saving format
>+     * so that compatible CPUs can have a check on format to decide
>+     * whether it can restore.
>+     */
>+    uint64_t         xfeature_mask;

uint64_aligned_t.

>+    /*
>+     * SET: Size of struct (IN)
>+     * GET: Size of struct (IN/OUT)
>+     */
>+    uint64_t         size;

Here too.

>+#if defined(__i386__) || defined(__x86_64__)

Why? The structure makes no sense without the following field, so
either the whole structure is x86-specific, or the field is generic as
is the rest of the structure.

>+    XEN_GUEST_HANDLE_64(uint64) buffer;
>+#endif
>+};

Jan

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Fwd: [Patch 4/4] Refining Xsave/Xrestore support
       [not found]   ` <AANLkTi=1fW3rQL+8SRAzYv3D6Lqo2PGC7uYzd5VkX8hw@mail.gmail.com>
@ 2010-10-28  2:32     ` Haitao Shan
  2010-10-28  9:18       ` Tim Deegan
  0 siblings, 1 reply; 11+ messages in thread
From: Haitao Shan @ 2010-10-28  2:32 UTC (permalink / raw)
  To: xen-devel, Keir Fraser, Jan Beulich

Sorry, I forget to reply to all in my reply to Deegan.

Shan Haitao


---------- Forwarded message ----------
From: Haitao Shan <maillists.shan@gmail.com>
Date: 2010/10/28
Subject: Re: [Xen-devel] [Patch 4/4] Refining Xsave/Xrestore support
To: Tim Deegan <Tim.Deegan@citrix.com>


Hi, Deegan,

Thanks for your review. Please see my reply embedded.

2010/10/27 Tim Deegan <Tim.Deegan@citrix.com>:
> Hi,
>
> Thanks for this - good to see XSAVE save/restore working.  I've no
> comments on the tools part of this patch; it looks plausible but I
> haven't reviewed it closely.
>
> On the Xen HVM side:
>
>> diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
>> --- a/xen/arch/x86/hvm/hvm.c  Wed Oct 27 21:55:45 2010 +0800
>> +++ b/xen/arch/x86/hvm/hvm.c  Wed Oct 27 22:17:24 2010 +0800
>> @@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
>>          vc = &v->arch.guest_context;
>>
>>          if ( v->fpu_initialised )
>> -            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
>> -        else
>> +            if ( cpu_has_xsave )
>> +                /* to restore guest img saved on xsave-incapable host */
>> +                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
>> +                       sizeof(ctxt.fpu_regs));
>> +            else
>> +                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>
> I think this hunk belongs in hvm_LOAD_cpu_ctxt()!
I once did the same as you said. But doing this in hvm_load_cpu_ctxt
will depends on two:
1. hvm_load_cpu_ctxt can not be executed before xsave restore routine
is executed. Otherwise, xsave_area contains no useful data at the time
of copying.
2. It seems to break restore when HVM guest (no touching eXtended
States at all) saved on a Xsave-capable host is later restored on a
Xsave-incapable host.
So I moved this part of code to hvm_save_cpu_ctxt, so that inside
guest save images FPU context is consistent.

>
>> +        else
>>              memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
>>
>>          ctxt.rax = vc->user_regs.eax;
>
> [...]
>
>> +    ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
>> +    h->cur += desc->length;
>> +
>> +    _xfeature_mask = ctxt->xfeature_mask;
>> +    if ( (_xfeature_mask & xfeature_mask) != xfeature_mask )
>> +        return -EINVAL;
>
> This allows XSAVE records to be loaded on machines with fewer features.
> Is that safe?
Oh, my mistake I will fix that.

>
>> +    v->arch.xcr0 = ctxt->xcr0;
>> +    v->arch.xcr0_accum = ctxt->xcr0_accum;
>> +    memcpy(v->arch.xsave_area, &ctxt->save_area, xsave_cntxt_size);
>> +
>> +    return 0;
>> +}
>
> Also, have you tested this on CPUs that don't support XSAVE?  The PV
> hypercall looks like it will return -EFAULT after trying to
> copy_from_user into a null pointer on the Xen side, but something more
> explicit would be better.
Sure. I will add that in my updated patch.

>
> Cheers,
>
> Tim.
>
> --
> Tim Deegan <Tim.Deegan@citrix.com>
> Principal Software Engineer, XenServer Engineering
> Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-27 10:39 ` Jan Beulich
@ 2010-10-28  2:52   ` Haitao Shan
  2010-10-28  7:21     ` Jan Beulich
  0 siblings, 1 reply; 11+ messages in thread
From: Haitao Shan @ 2010-10-28  2:52 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Weidong Han, Keir Fraser

Hi, Jan,

Thanks for reviewing. I am really not good in coding. :)
Please see my comments embedded.

2010/10/27 Jan Beulich <JBeulich@novell.com>:
>>@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
>> /* Load the p2m frame list, plus potential extended info chunk */
>> static xen_pfn_t *load_p2m_frame_list(
>>     xc_interface *xch, struct restore_ctx *ctx,
>>-    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
>>+    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
>>+    int *vcpuextstate, uint64_t *vcpuextstate_size)
>
> What value is it to have vcpuextstate_size (here any elsewhere in
> the patch) be a 64-bit quantity? In 32-bit tools exceeding 4G here
> wouldn't work anyway, and iirc the value really can't exceed 32 bits
> anyway.
Yes. Using 64-bit is my preference when I cannot guarantee the size is
below 4G. The size of XSAVE_AREA is 4G max since it is reported by
ECX. :) However, I have currently two (maybe future more XCRx)
registers to save. So........ But it unlikely to reach the 4G bound in
real life.

>
>>@@ -781,6 +781,31 @@ struct xen_domctl_mem_sharing_op {
>> typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
>> DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
>>
>>+/* XEN_DOMCTL_setvcpuextstate */
>>+/* XEN_DOMCTL_getvcpuextstate */
>>+struct xen_domctl_vcpuextstate {
>>+    /* IN: VCPU that this call applies to. */
>>+    uint32_t         vcpu;
>>+    /*
>>+     * SET: xfeature support mask of struct (IN)
>>+     * GET: xfeature support mask of struct (IN/OUT)
>>+     * xfeature mask is served as identifications of the saving format
>>+     * so that compatible CPUs can have a check on format to decide
>>+     * whether it can restore.
>>+     */
>>+    uint64_t         xfeature_mask;
>
> uint64_aligned_t.
>
>>+    /*
>>+     * SET: Size of struct (IN)
>>+     * GET: Size of struct (IN/OUT)
>>+     */
>>+    uint64_t         size;
>
> Here too.
I will add that in my updated patch.

>
>>+#if defined(__i386__) || defined(__x86_64__)
>
> Why? The structure makes no sense without the following field, so
> either the whole structure is x86-specific, or the field is generic as
> is the rest of the structure.
>
>>+    XEN_GUEST_HANDLE_64(uint64) buffer;
>>+#endif
>>+};
I prototyped my hypercall according to another hypercall, which is
also X86 specific.Though I feel some ugly, I just follow the existing
coding style....
I will include the whole structure.

/* XEN_DOMCTL_set_ext_vcpucontext */
/* XEN_DOMCTL_get_ext_vcpucontext */
struct xen_domctl_ext_vcpucontext {
    /* IN: VCPU that this call applies to. */
    uint32_t         vcpu;
    /*
     * SET: Size of struct (IN)
     * GET: Size of struct (OUT)
     */
    uint32_t         size;
#if defined(__i386__) || defined(__x86_64__)
    /* SYSCALL from 32-bit mode and SYSENTER callback information. */
    /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
    uint64_aligned_t syscall32_callback_eip;
    uint64_aligned_t sysenter_callback_eip;
    uint16_t         syscall32_callback_cs;
    uint16_t         sysenter_callback_cs;
    uint8_t          syscall32_disables_events;
    uint8_t          sysenter_disables_events;
#endif


>
> Jan
>
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-27 10:25 ` Tim Deegan
       [not found]   ` <AANLkTi=1fW3rQL+8SRAzYv3D6Lqo2PGC7uYzd5VkX8hw@mail.gmail.com>
@ 2010-10-28  4:57   ` Haitao Shan
  1 sibling, 0 replies; 11+ messages in thread
From: Haitao Shan @ 2010-10-28  4:57 UTC (permalink / raw)
  To: Tim Deegan, Jan Beulich; +Cc: xen-devel, Han, Weidong, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 2229 bytes --]

This is updated patch. Thanks!

Shan Haitao

2010/10/27 Tim Deegan <Tim.Deegan@citrix.com>:
> Hi,
>
> Thanks for this - good to see XSAVE save/restore working.  I've no
> comments on the tools part of this patch; it looks plausible but I
> haven't reviewed it closely.
>
> On the Xen HVM side:
>
>> diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
>> --- a/xen/arch/x86/hvm/hvm.c  Wed Oct 27 21:55:45 2010 +0800
>> +++ b/xen/arch/x86/hvm/hvm.c  Wed Oct 27 22:17:24 2010 +0800
>> @@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
>>          vc = &v->arch.guest_context;
>>
>>          if ( v->fpu_initialised )
>> -            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
>> -        else
>> +            if ( cpu_has_xsave )
>> +                /* to restore guest img saved on xsave-incapable host */
>> +                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
>> +                       sizeof(ctxt.fpu_regs));
>> +            else
>> +                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>
> I think this hunk belongs in hvm_LOAD_cpu_ctxt()!
>
>> +        else
>>              memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
>>
>>          ctxt.rax = vc->user_regs.eax;
>
> [...]
>
>> +    ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
>> +    h->cur += desc->length;
>> +
>> +    _xfeature_mask = ctxt->xfeature_mask;
>> +    if ( (_xfeature_mask & xfeature_mask) != xfeature_mask )
>> +        return -EINVAL;
>
> This allows XSAVE records to be loaded on machines with fewer features.
> Is that safe?
>
>> +    v->arch.xcr0 = ctxt->xcr0;
>> +    v->arch.xcr0_accum = ctxt->xcr0_accum;
>> +    memcpy(v->arch.xsave_area, &ctxt->save_area, xsave_cntxt_size);
>> +
>> +    return 0;
>> +}
>
> Also, have you tested this on CPUs that don't support XSAVE?  The PV
> hypercall looks like it will return -EFAULT after trying to
> copy_from_user into a null pointer on the Xen side, but something more
> explicit would be better.
>
> Cheers,
>
> Tim.
>
> --
> Tim Deegan <Tim.Deegan@citrix.com>
> Principal Software Engineer, XenServer Engineering
> Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)
>

[-- Attachment #2: xsave-migration.patch --]
[-- Type: application/octet-stream, Size: 26847 bytes --]

diff -r f3a56ede6a3e tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c	Thu Oct 28 19:21:16 2010 +0800
+++ b/tools/libxc/xc_domain_restore.c	Thu Oct 28 20:56:56 2010 +0800
@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
 /* Load the p2m frame list, plus potential extended info chunk */
 static xen_pfn_t *load_p2m_frame_list(
     xc_interface *xch, struct restore_ctx *ctx,
-    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
+    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
+    int *vcpuextstate, uint64_t *vcpuextstate_size)
 {
     xen_pfn_t *p2m_frame_list;
     vcpu_guest_context_any_t ctxt;
@@ -266,6 +267,13 @@ static xen_pfn_t *load_p2m_frame_list(
             {
                 *ext_vcpucontext = 1;
             }
+            else if ( !strncmp(chunk_sig, "xcnt", 4) )
+            {
+                *vcpuextstate = 1;
+                RDEXACT(io_fd, vcpuextstate_size, sizeof(*vcpuextstate_size));
+                tot_bytes -= chunk_bytes;
+                chunk_bytes = 0;
+            }
             
             /* Any remaining bytes of this chunk: read and discard. */
             while ( chunk_bytes )
@@ -449,7 +457,8 @@ static int dump_qemu(xc_interface *xch, 
 static int buffer_tail_hvm(xc_interface *xch, struct restore_ctx *ctx,
                            struct tailbuf_hvm *buf, int fd,
                            unsigned int max_vcpu_id, uint64_t vcpumap,
-                           int ext_vcpucontext)
+                           int ext_vcpucontext,
+                           int vcpuextstate, uint64_t vcpuextstate_size)
 {
     uint8_t *tmp;
     unsigned char qemusig[21];
@@ -511,7 +520,9 @@ static int buffer_tail_hvm(xc_interface 
 static int buffer_tail_pv(xc_interface *xch, struct restore_ctx *ctx,
                           struct tailbuf_pv *buf, int fd,
                           unsigned int max_vcpu_id, uint64_t vcpumap,
-                          int ext_vcpucontext)
+                          int ext_vcpucontext,
+                          int vcpuextstate,
+                          uint64_t vcpuextstate_size)
 {
     unsigned int i;
     size_t pfnlen, vcpulen;
@@ -551,6 +562,9 @@ static int buffer_tail_pv(xc_interface *
                : sizeof(vcpu_guest_context_x86_32_t)) * buf->vcpucount;
     if ( ext_vcpucontext )
         vcpulen += 128 * buf->vcpucount;
+    if ( vcpuextstate ) {
+        vcpulen += vcpuextstate_size * buf->vcpucount;
+    }
 
     if ( !(buf->vcpubuf) ) {
         if ( !(buf->vcpubuf = malloc(vcpulen)) ) {
@@ -589,14 +603,17 @@ static int buffer_tail_pv(xc_interface *
 
 static int buffer_tail(xc_interface *xch, struct restore_ctx *ctx,
                        tailbuf_t *buf, int fd, unsigned int max_vcpu_id,
-                       uint64_t vcpumap, int ext_vcpucontext)
+                       uint64_t vcpumap, int ext_vcpucontext,
+                       int vcpuextstate, uint64_t vcpuextstate_size)
 {
     if ( buf->ishvm )
         return buffer_tail_hvm(xch, ctx, &buf->u.hvm, fd, max_vcpu_id, vcpumap,
-                               ext_vcpucontext);
+                               ext_vcpucontext, vcpuextstate,
+                               vcpuextstate_size);
     else
         return buffer_tail_pv(xch, ctx, &buf->u.pv, fd, max_vcpu_id, vcpumap,
-                              ext_vcpucontext);
+                              ext_vcpucontext, vcpuextstate,
+                              vcpuextstate_size);
 }
 
 static void tailbuf_free_hvm(struct tailbuf_hvm *buf)
@@ -1051,6 +1068,8 @@ int xc_domain_restore(xc_interface *xch,
 {
     DECLARE_DOMCTL;
     int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0;
+    int vcpuextstate = 0;
+    uint64_t vcpuextstate_size = 0;
     unsigned long mfn, pfn;
     unsigned int prev_pc;
     int nraces = 0;
@@ -1065,6 +1084,9 @@ int xc_domain_restore(xc_interface *xch,
     /* A copy of the CPU context of the guest. */
     DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
 
+    /* A copy of the CPU eXtended States of the guest. */
+    DECLARE_HYPERCALL_BUFFER(void, buffer);
+
     /* A table containing the type of each PFN (/not/ MFN!). */
     unsigned long *pfn_type = NULL;
 
@@ -1151,7 +1173,9 @@ int xc_domain_restore(xc_interface *xch,
     {
         /* Load the p2m frame list, plus potential extended info chunk */
         p2m_frame_list = load_p2m_frame_list(xch, ctx,
-            io_fd, &pae_extended_cr3, &ext_vcpucontext);
+            io_fd, &pae_extended_cr3, &ext_vcpucontext,
+            &vcpuextstate, &vcpuextstate_size);
+
         if ( !p2m_frame_list )
             goto out;
 
@@ -1298,10 +1322,11 @@ int xc_domain_restore(xc_interface *xch,
     if ( !ctx->completed ) {
 
         if ( buffer_tail(xch, ctx, &tailbuf, io_fd, max_vcpu_id, vcpumap,
-                         ext_vcpucontext) < 0 ) {
+                         ext_vcpucontext, vcpuextstate, vcpuextstate_size) < 0 ) {
             ERROR ("error buffering image tail");
             goto out;
         }
+
         ctx->completed = 1;
 
         /*
@@ -1327,7 +1352,7 @@ int xc_domain_restore(xc_interface *xch,
     memset(&tmptail, 0, sizeof(tmptail));
     tmptail.ishvm = hvm;
     if ( buffer_tail(xch, ctx, &tmptail, io_fd, max_vcpu_id, vcpumap,
-                     ext_vcpucontext) < 0 ) {
+                     ext_vcpucontext, vcpuextstate, vcpuextstate_size) < 0 ) {
         ERROR ("error buffering image tail, finishing");
         goto finish;
     }
@@ -1648,7 +1673,7 @@ int xc_domain_restore(xc_interface *xch,
         }
 
         if ( !ext_vcpucontext )
-            continue;
+            goto vcpu_ext_state_restore;
         memcpy(&domctl.u.ext_vcpucontext, vcpup, 128);
         vcpup += 128;
         domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
@@ -1659,6 +1684,39 @@ int xc_domain_restore(xc_interface *xch,
             PERROR("Couldn't set extended vcpu%d info", i);
             goto out;
         }
+
+ vcpu_ext_state_restore:
+        if ( !vcpuextstate )
+            continue;
+
+        memcpy(&domctl.u.vcpuextstate.xfeature_mask, vcpup,
+               sizeof(domctl.u.vcpuextstate.xfeature_mask));
+        vcpup += sizeof(domctl.u.vcpuextstate.xfeature_mask);
+        memcpy(&domctl.u.vcpuextstate.size, vcpup,
+               sizeof(domctl.u.vcpuextstate.size));
+        vcpup += sizeof(domctl.u.vcpuextstate.size);
+
+        buffer = xc_hypercall_buffer_alloc(xch, buffer,
+                                           domctl.u.vcpuextstate.size);
+        if ( !buffer )
+        {
+            PERROR("Could not allocate buffer to restore eXtended States");
+            goto out;
+        }
+        memcpy(buffer, vcpup, domctl.u.vcpuextstate.size);
+        vcpup += domctl.u.vcpuextstate.size;
+
+        domctl.cmd = XEN_DOMCTL_setvcpuextstate;
+        domctl.domain = dom;
+        domctl.u.vcpuextstate.vcpu = i;
+        set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+        frc = xc_domctl(xch, &domctl);
+        if ( frc != 0 )
+        {
+            PERROR("Couldn't set eXtended States for vcpu%d", i);
+            goto out;
+        }
+        xc_hypercall_buffer_free(xch, buffer);
     }
 
     memcpy(shared_info_page, tailbuf.u.pv.shared_info_page, PAGE_SIZE);
diff -r f3a56ede6a3e tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c	Thu Oct 28 19:21:16 2010 +0800
+++ b/tools/libxc/xc_domain_save.c	Thu Oct 28 20:56:56 2010 +0800
@@ -810,14 +810,34 @@ static xen_pfn_t *map_and_save_p2m_table
                               ? sizeof(ctxt.x64) 
                               : sizeof(ctxt.x32));
         uint32_t chunk2_sz = 0;
-        uint32_t tot_sz    = (chunk1_sz + 8) + (chunk2_sz + 8);
+        uint32_t chunk3_sz = 8;
+        uint32_t tot_sz;
+        DECLARE_DOMCTL;
+
+        domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+        domctl.domain = dom;
+        domctl.u.vcpuextstate.vcpu = 0;
+        domctl.u.vcpuextstate.size = 0;
+        domctl.u.vcpuextstate.xfeature_mask = 0;
+        if ( xc_domctl(xch, &domctl) < 0 )
+        {
+            PERROR("No extended context for VCPU%d", i);
+            goto out;
+        }
+
+        tot_sz = (chunk1_sz + 8) + (chunk2_sz + 8) + (chunk3_sz + 8);
+
         if ( write_exact(io_fd, &signature, sizeof(signature)) ||
              write_exact(io_fd, &tot_sz, sizeof(tot_sz)) ||
              write_exact(io_fd, "vcpu", 4) ||
              write_exact(io_fd, &chunk1_sz, sizeof(chunk1_sz)) ||
              write_exact(io_fd, &ctxt, chunk1_sz) ||
              write_exact(io_fd, "extv", 4) ||
-             write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) )
+             write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) ||
+             write_exact(io_fd, "xcnt", 4) ||
+             write_exact(io_fd, &chunk3_sz, sizeof(chunk3_sz)) ||
+             write_exact(io_fd, &domctl.u.vcpuextstate.size,
+                         sizeof(domctl.u.vcpuextstate.size)) )
         {
             PERROR("write: extended info");
             goto out;
@@ -905,6 +925,9 @@ int xc_domain_save(xc_interface *xch, in
     /* base of the region in which domain memory is mapped */
     unsigned char *region_base = NULL;
 
+    /* A copy of the CPU eXtended States of the guest. */
+    DECLARE_HYPERCALL_BUFFER(void, buffer);
+
     /* bitmap of pages:
        - that should be sent this iteration (unless later marked as skip);
        - to skip this iteration because already dirty;
@@ -1786,6 +1809,53 @@ int xc_domain_save(xc_interface *xch, in
             PERROR("Error when writing to state file (2)");
             goto out;
         }
+
+        /* Start to fetch CPU eXtended States */
+        /* Get buffer size first */
+        domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+        domctl.domain = dom;
+        domctl.u.vcpuextstate.vcpu = i;
+        domctl.u.vcpuextstate.xfeature_mask = 0;
+        domctl.u.vcpuextstate.size = 0;
+        if ( xc_domctl(xch, &domctl) < 0 )
+        {
+            PERROR("No eXtended states (XSAVE) for VCPU%d", i);
+            goto out;
+        }
+
+        /* Getting eXtended states data */
+        buffer = xc_hypercall_buffer_alloc(xch, buffer, domctl.u.vcpuextstate.size);
+        if ( !buffer )
+        {
+            PERROR("Insufficient memory for getting eXtended states for"
+                   "VCPU%d", i);
+            goto out;
+        }
+        set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+        if ( xc_domctl(xch, &domctl) < 0 )
+        {
+            PERROR("No eXtended states (XSAVE) for VCPU%d", i);
+            goto out;
+        }
+
+        if ( wrexact(io_fd, &domctl.u.vcpuextstate.xfeature_mask,
+                     sizeof(domctl.u.vcpuextstate.xfeature_mask)) )
+        {
+            PERROR("Error when writing to state file (2)");
+            goto out;
+        }
+        if ( wrexact(io_fd, &domctl.u.vcpuextstate.size,
+                     sizeof(domctl.u.vcpuextstate.size)) )
+        {
+            PERROR("Error when writing to state file (2)");
+            goto out;
+        }
+        if ( wrexact(io_fd, buffer, domctl.u.vcpuextstate.size) )
+        {
+            PERROR("Error when writing to state file (2)");
+            goto out;
+        }
+        xc_hypercall_buffer_free(xch, buffer);
     }
 
     /*
diff -r f3a56ede6a3e xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/arch/x86/domctl.c	Thu Oct 28 20:56:56 2010 +0800
@@ -33,6 +33,7 @@
 #include <asm/mem_event.h>
 #include <public/mem_event.h>
 #include <asm/mem_sharing.h>
+#include <asm/i387.h>
 
 #ifdef XEN_KDB_CONFIG
 #include "../kdb/include/kdbdefs.h"
@@ -1406,6 +1407,128 @@ long arch_do_domctl(
     }
     break;
 
+    case XEN_DOMCTL_setvcpuextstate:
+    case XEN_DOMCTL_getvcpuextstate:
+    {
+        struct xen_domctl_vcpuextstate *evc;
+        struct domain *d;
+        struct vcpu *v;
+        uint32_t offset = 0;
+        uint64_t _xfeature_mask;
+
+#define PV_XSAVE_SIZE (2 * sizeof(uint64_t) + xsave_cntxt_size)
+
+        evc = &domctl->u.vcpuextstate;
+
+        ret = -ESRCH;
+
+        if ( !cpu_has_xsave )
+            break;
+
+        d = rcu_lock_domain_by_id(domctl->domain);
+        if ( d == NULL )
+            break;
+
+        ret = xsm_vcpuextstate(d, domctl->cmd);
+        if ( ret )
+            goto vcpuextstate_out;
+
+        ret = -ESRCH;
+        if ( (evc->vcpu >= d->max_vcpus) ||
+             ((v = d->vcpu[evc->vcpu]) == NULL) )
+            goto vcpuextstate_out;
+
+        if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
+        {
+            if ( !evc->size && !evc->xfeature_mask )
+            {
+                evc->xfeature_mask = xfeature_mask;
+                evc->size = PV_XSAVE_SIZE;
+                ret = 0;
+                goto vcpuextstate_out;
+            }
+            if ( evc->size != PV_XSAVE_SIZE ||
+                 evc->xfeature_mask != xfeature_mask )
+            {
+                ret = EFAULT;
+                goto vcpuextstate_out;
+            }
+            if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+                                      offset, (void *)&v->arch.xcr0,
+                                      sizeof(v->arch.xcr0)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0);
+            if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+                                      offset, (void *)&v->arch.xcr0_accum,
+                                      sizeof(v->arch.xcr0_accum)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0_accum);
+            if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+                                      offset, v->arch.xsave_area,
+                                      xsave_cntxt_size) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+        }
+        else
+        {
+            ret = -EINVAL;
+
+            _xfeature_mask = evc->xfeature_mask;
+            /* xsave context must be restored on compatible target CPUs */
+            if ( (_xfeature_mask & xfeature_mask) != _xfeature_mask )
+                goto vcpuextstate_out;
+            if ( evc->size > PV_XSAVE_SIZE )
+                goto vcpuextstate_out;
+
+            if ( copy_from_guest_offset((void *)&v->arch.xcr0,
+                                        domctl->u.vcpuextstate.buffer, offset,
+                                        sizeof(v->arch.xcr0_accum)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            if ( !(v->arch.xcr0 & XSTATE_FP) ||
+                 (v->arch.xcr0 & ~xfeature_mask) )
+            {
+                ret = -EINVAL;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0);
+            if ( copy_from_guest_offset((void *)&v->arch.xcr0_accum,
+                                        domctl->u.vcpuextstate.buffer, offset,
+                                        sizeof(v->arch.xcr0_accum)) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+            offset += sizeof(v->arch.xcr0_accum);
+            if ( copy_from_guest_offset(v->arch.xsave_area,
+                                        domctl->u.vcpuextstate.buffer, offset,
+                                        xsave_cntxt_size) )
+            {
+                ret = -EFAULT;
+                goto vcpuextstate_out;
+            }
+        }
+
+        ret = 0;
+
+    vcpuextstate_out:
+        rcu_unlock_domain(d);
+        if ( (domctl->cmd == XEN_DOMCTL_getvcpuextstate) &&
+             copy_to_guest(u_domctl, domctl, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
 #ifdef __x86_64__
     case XEN_DOMCTL_mem_event_op:
     {
@@ -1455,6 +1578,11 @@ void arch_get_info_guest(struct vcpu *v,
 #define c(fld) (c.nat->fld)
 #endif
 
+    /* Fill legacy context from xsave area first */
+    if ( cpu_has_xsave )
+        memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
+               sizeof(v->arch.guest_context.fpu_ctxt));
+
     if ( !is_pv_32on64_domain(v->domain) )
         memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
 #ifdef CONFIG_COMPAT
diff -r f3a56ede6a3e xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/arch/x86/hvm/hvm.c	Thu Oct 28 20:56:56 2010 +0800
@@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
         vc = &v->arch.guest_context;
 
         if ( v->fpu_initialised )
-            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
-        else 
+            if ( cpu_has_xsave )
+                /* to restore guest img saved on xsave-incapable host */
+                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
+                       sizeof(ctxt.fpu_regs));
+            else
+                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+        else
             memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
 
         ctxt.rax = vc->user_regs.eax;
@@ -799,6 +804,113 @@ static int hvm_load_cpu_ctxt(struct doma
 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
                           1, HVMSR_PER_VCPU);
 
+#define HVM_CPU_XSAVE_SIZE  (3 * sizeof(uint64_t) + xsave_cntxt_size)
+
+static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_hw_cpu_xsave *ctxt;
+
+    if ( !cpu_has_xsave )
+        return 0;   /* do nothing */
+
+    for_each_vcpu ( d, v )
+    {
+        if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, HVM_CPU_XSAVE_SIZE) )
+            return 1;
+        ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+        h->cur += HVM_CPU_XSAVE_SIZE;
+        memset(ctxt, 0, HVM_CPU_XSAVE_SIZE);
+
+        ctxt->xfeature_mask = xfeature_mask;
+        ctxt->xcr0 = v->arch.xcr0;
+        ctxt->xcr0_accum = v->arch.xcr0_accum;
+        if ( v->fpu_initialised )
+            memcpy(&ctxt->save_area,
+                v->arch.xsave_area, xsave_cntxt_size);
+    }
+
+    return 0;
+}
+
+static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid;
+    struct vcpu *v;
+    struct hvm_hw_cpu_xsave *ctxt;
+    struct hvm_save_descriptor *desc;
+    uint64_t _xfeature_mask;
+
+    /* fails since we can't restore an img saved on xsave-capable host */
+//XXX: 
+    if ( !cpu_has_xsave )
+        return -EINVAL;
+
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
+        return -EINVAL;
+    }
+
+    /* Customized checking for entry since our entry is of variable length */
+    desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+    if ( sizeof (*desc) > h->size - h->cur)
+    {
+        gdprintk(XENLOG_WARNING,
+                 "HVM restore: not enough data left to read descriptpr"
+                 "for type %u\n", CPU_XSAVE_CODE);
+        return -1;
+    }
+    if ( desc->length + sizeof (*desc) > h->size - h->cur)
+    {
+        gdprintk(XENLOG_WARNING,
+                 "HVM restore: not enough data left to read %u bytes "
+                 "for type %u\n", desc->length, CPU_XSAVE_CODE);
+        return -1;
+    }
+    if ( CPU_XSAVE_CODE != desc->typecode || (desc->length > HVM_CPU_XSAVE_SIZE) )
+    {
+        gdprintk(XENLOG_WARNING,
+                 "HVM restore mismatch: expected type %u with max length %u, "
+                 "saw type %u length %u\n", CPU_XSAVE_CODE,
+                 (uint32_t)HVM_CPU_XSAVE_SIZE,
+                 desc->typecode, desc->length);
+        return -1;
+    }
+    h->cur += sizeof (*desc);
+    /* Checking finished */
+
+    ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+    h->cur += desc->length;
+
+    _xfeature_mask = ctxt->xfeature_mask;
+    if ( (_xfeature_mask & xfeature_mask) != _xfeature_mask )
+        return -EINVAL;
+
+    v->arch.xcr0 = ctxt->xcr0;
+    v->arch.xcr0_accum = ctxt->xcr0_accum;
+    memcpy(v->arch.xsave_area, &ctxt->save_area, xsave_cntxt_size);
+
+    return 0;
+}
+
+/* We need variable length data chunk for xsave area, hence customized
+ * declaration other than HVM_REGISTER_SAVE_RESTORE.
+ */
+static int __hvm_register_CPU_XSAVE_save_and_restore(void)
+{
+    hvm_register_savevm(CPU_XSAVE_CODE,
+                        "CPU_XSAVE",
+                        hvm_save_cpu_xsave_states,
+                        hvm_load_cpu_xsave_states,
+                        HVM_CPU_XSAVE_SIZE + sizeof (struct hvm_save_descriptor),
+                        HVMSR_PER_VCPU);
+    return 0;
+}
+__initcall(__hvm_register_CPU_XSAVE_save_and_restore);
+
 int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
diff -r f3a56ede6a3e xen/include/public/arch-x86/hvm/save.h
--- a/xen/include/public/arch-x86/hvm/save.h	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/include/public/arch-x86/hvm/save.h	Thu Oct 28 20:56:56 2010 +0800
@@ -431,9 +431,32 @@ struct hvm_viridian_context {
 
 DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
 
+
+/*
+ * The save area of XSAVE/XRSTOR.
+ */
+
+struct hvm_hw_cpu_xsave {
+    uint64_t xfeature_mask;
+    uint64_t xcr0;                 /* Updated by XSETBV */
+    uint64_t xcr0_accum;           /* Updated by XSETBV */
+    struct {
+        struct { char x[512]; } fpu_sse;
+
+        struct {
+            uint64_t xstate_bv;         /* Updated by XRSTOR */
+            uint64_t reserved[7];
+        } xsave_hdr;                    /* The 64-byte header */
+
+        struct { char x[0]; } ymm;    /* YMM */
+    } save_area;
+} __attribute__((packed));
+
+#define CPU_XSAVE_CODE  16
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 15
+#define HVM_SAVE_CODE_MAX 16
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
diff -r f3a56ede6a3e xen/include/public/domctl.h
--- a/xen/include/public/domctl.h	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/include/public/domctl.h	Thu Oct 28 20:56:56 2010 +0800
@@ -781,6 +781,31 @@ struct xen_domctl_mem_sharing_op {
 typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
 
+#if defined(__i386__) || defined(__x86_64__)
+/* XEN_DOMCTL_setvcpuextstate */
+/* XEN_DOMCTL_getvcpuextstate */
+struct xen_domctl_vcpuextstate {
+    /* IN: VCPU that this call applies to. */
+    uint32_t         vcpu;
+    /*
+     * SET: xfeature support mask of struct (IN)
+     * GET: xfeature support mask of struct (IN/OUT)
+     * xfeature mask is served as identifications of the saving format
+     * so that compatible CPUs can have a check on format to decide
+     * whether it can restore.
+     */
+    uint64_aligned_t         xfeature_mask;
+    /*
+     * SET: Size of struct (IN)
+     * GET: Size of struct (IN/OUT)
+     */
+    uint64_aligned_t         size;
+    XEN_GUEST_HANDLE_64(uint64) buffer;
+};
+typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
+#endif
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -841,6 +866,8 @@ struct xen_domctl {
 #define XEN_DOMCTL_gettscinfo                    59
 #define XEN_DOMCTL_settscinfo                    60
 #define XEN_DOMCTL_getpageframeinfo3             61
+#define XEN_DOMCTL_setvcpuextstate               62
+#define XEN_DOMCTL_getvcpuextstate               63
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -891,6 +918,7 @@ struct xen_domctl {
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
+        struct xen_domctl_vcpuextstate      vcpuextstate;
 #endif
         struct xen_domctl_gdbsx_memio       gdbsx_guest_memio;
         struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
diff -r f3a56ede6a3e xen/include/xsm/xsm.h
--- a/xen/include/xsm/xsm.h	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/include/xsm/xsm.h	Thu Oct 28 20:56:56 2010 +0800
@@ -149,6 +149,7 @@ struct xsm_operations {
     int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
     int (*pin_mem_cacheattr) (struct domain *d);
     int (*ext_vcpucontext) (struct domain *d, uint32_t cmd);
+    int (*vcpuextstate) (struct domain *d, uint32_t cmd);
 #endif
 };
 
@@ -622,6 +623,10 @@ static inline int xsm_ext_vcpucontext(st
 {
     return xsm_call(ext_vcpucontext(d, cmd));
 }
+static inline int xsm_vcpuextstate(struct domain *d, uint32_t cmd)
+{
+    return xsm_call(vcpuextstate(d, cmd));
+}
 #endif /* CONFIG_X86 */
 
 #endif /* __XSM_H */
diff -r f3a56ede6a3e xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/xsm/flask/hooks.c	Thu Oct 28 20:56:56 2010 +0800
@@ -1177,6 +1177,25 @@ static int flask_ext_vcpucontext (struct
 
     return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm);
 }
+
+static int flask_vcpuextstate (struct domain *d, uint32_t cmd)
+{
+    u32 perm;
+
+    switch ( cmd )
+    {
+        case XEN_DOMCTL_setvcpuextstate:
+            perm = DOMAIN__SETVCPUEXTSTATE;
+        break;
+        case XEN_DOMCTL_getvcpuextstate:
+            perm = DOMAIN__GETVCPUEXTSTATE;
+        break;
+        default:
+            return -EPERM;
+    }
+
+    return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm);
+}
 #endif
 
 static int io_has_perm(struct domain *d, char *name, unsigned long s, 
@@ -1328,6 +1347,7 @@ static struct xsm_operations flask_ops =
     .bind_pt_irq = flask_bind_pt_irq,
     .pin_mem_cacheattr = flask_pin_mem_cacheattr,
     .ext_vcpucontext = flask_ext_vcpucontext,
+    .vcpuextstate = flask_vcpuextstate,
 #endif
 };
 
diff -r f3a56ede6a3e xen/xsm/flask/include/av_permissions.h
--- a/xen/xsm/flask/include/av_permissions.h	Thu Oct 28 19:21:16 2010 +0800
+++ b/xen/xsm/flask/include/av_permissions.h	Thu Oct 28 20:56:56 2010 +0800
@@ -51,6 +51,8 @@
 #define DOMAIN__TRIGGER                           0x00800000UL
 #define DOMAIN__GETEXTVCPUCONTEXT                 0x01000000UL
 #define DOMAIN__SETEXTVCPUCONTEXT                 0x02000000UL
+#define DOMAIN__GETVCPUEXTSTATE                   0x04000000UL
+#define DOMAIN__SETVCPUEXTSTATE                   0x08000000UL
 
 #define HVM__SETHVMC                              0x00000001UL
 #define HVM__GETHVMC                              0x00000002UL

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-28  2:52   ` Haitao Shan
@ 2010-10-28  7:21     ` Jan Beulich
  2010-10-28  7:34       ` Haitao Shan
  0 siblings, 1 reply; 11+ messages in thread
From: Jan Beulich @ 2010-10-28  7:21 UTC (permalink / raw)
  To: Haitao Shan; +Cc: xen-devel, Weidong Han, Keir Fraser

>>> On 28.10.10 at 04:52, Haitao Shan <maillists.shan@gmail.com> wrote:
> 2010/10/27 Jan Beulich <JBeulich@novell.com>:
>>>@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
>>> /* Load the p2m frame list, plus potential extended info chunk */
>>> static xen_pfn_t *load_p2m_frame_list(
>>>     xc_interface *xch, struct restore_ctx *ctx,
>>>-    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
>>>+    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
>>>+    int *vcpuextstate, uint64_t *vcpuextstate_size)
>>
>> What value is it to have vcpuextstate_size (here any elsewhere in
>> the patch) be a 64-bit quantity? In 32-bit tools exceeding 4G here
>> wouldn't work anyway, and iirc the value really can't exceed 32 bits
>> anyway.
> Yes. Using 64-bit is my preference when I cannot guarantee the size is
> below 4G. The size of XSAVE_AREA is 4G max since it is reported by
> ECX. :) However, I have currently two (maybe future more XCRx)
> registers to save. So........ But it unlikely to reach the 4G bound in
> real life.

This would make sense only if the value later didn't get truncated.

And I don't think one could even theoretically expect the size to
get anywhere near 4G - what would the performance of the save/
restore instruction be in that case?

Jan

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-28  7:21     ` Jan Beulich
@ 2010-10-28  7:34       ` Haitao Shan
  0 siblings, 0 replies; 11+ messages in thread
From: Haitao Shan @ 2010-10-28  7:34 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Weidong Han, Keir Fraser

Ah, good point! I will update the patch accordingly.

Shan Haitao

2010/10/28 Jan Beulich <JBeulich@novell.com>:
>>>> On 28.10.10 at 04:52, Haitao Shan <maillists.shan@gmail.com> wrote:
>> 2010/10/27 Jan Beulich <JBeulich@novell.com>:
>>>>@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
>>>> /* Load the p2m frame list, plus potential extended info chunk */
>>>> static xen_pfn_t *load_p2m_frame_list(
>>>>     xc_interface *xch, struct restore_ctx *ctx,
>>>>-    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
>>>>+    int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
>>>>+    int *vcpuextstate, uint64_t *vcpuextstate_size)
>>>
>>> What value is it to have vcpuextstate_size (here any elsewhere in
>>> the patch) be a 64-bit quantity? In 32-bit tools exceeding 4G here
>>> wouldn't work anyway, and iirc the value really can't exceed 32 bits
>>> anyway.
>> Yes. Using 64-bit is my preference when I cannot guarantee the size is
>> below 4G. The size of XSAVE_AREA is 4G max since it is reported by
>> ECX. :) However, I have currently two (maybe future more XCRx)
>> registers to save. So........ But it unlikely to reach the 4G bound in
>> real life.
>
> This would make sense only if the value later didn't get truncated.
>
> And I don't think one could even theoretically expect the size to
> get anywhere near 4G - what would the performance of the save/
> restore instruction be in that case?
>
> Jan
>
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Fwd: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-28  2:32     ` Fwd: " Haitao Shan
@ 2010-10-28  9:18       ` Tim Deegan
  2010-10-28 11:28         ` Haitao Shan
  0 siblings, 1 reply; 11+ messages in thread
From: Tim Deegan @ 2010-10-28  9:18 UTC (permalink / raw)
  To: Haitao Shan; +Cc: Keir, xen-devel, Fraser

Hi, 

At 03:32 +0100 on 28 Oct (1288236759), Haitao Shan wrote:
> >> diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
> >> --- a/xen/arch/x86/hvm/hvm.c  Wed Oct 27 21:55:45 2010 +0800
> >> +++ b/xen/arch/x86/hvm/hvm.c  Wed Oct 27 22:17:24 2010 +0800
> >> @@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
> >>          vc = &v->arch.guest_context;
> >>
> >>          if ( v->fpu_initialised )
> >> -            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
> >> -        else
> >> +            if ( cpu_has_xsave )
> >> +                /* to restore guest img saved on xsave-incapable host */
> >> +                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
> >> +                       sizeof(ctxt.fpu_regs));
> >> +            else
> >> +                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
> >
> > I think this hunk belongs in hvm_LOAD_cpu_ctxt()!
> I once did the same as you said. But doing this in hvm_load_cpu_ctxt
> will depends on two:
> 1. hvm_load_cpu_ctxt can not be executed before xsave restore routine
> is executed. Otherwise, xsave_area contains no useful data at the time
> of copying.

OK; then you should copy the other way in in the xsave load routine as
well.  Xsave load will always happen after the CPU load since save
records are always written in increasing order of type.

That way, if the save file has no xsave record, the new domain's xsave
state is initalized from the fpu record, and if it does then the fpu
state is initialized from the xsave record.  I think that's the
behaviour you want.

In any case this is *definitely* wrong where it is because the memcpy
arguments are the wrong way round. :)

> 2. It seems to break restore when HVM guest (no touching eXtended
> States at all) saved on a Xsave-capable host is later restored on a
> Xsave-incapable host.

That not a safe thing to do anyway -- once you've told the guest (via
CPUID) that XSAVE is available you can't migrate it to a host where it's
not supported.

Cheers,

Tim.

-- 
Tim Deegan <Tim.Deegan@citrix.com>
Principal Software Engineer, XenServer Engineering
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Fwd: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-28  9:18       ` Tim Deegan
@ 2010-10-28 11:28         ` Haitao Shan
  2010-10-28 13:05           ` Keir Fraser
  0 siblings, 1 reply; 11+ messages in thread
From: Haitao Shan @ 2010-10-28 11:28 UTC (permalink / raw)
  To: Tim Deegan; +Cc: xen-devel, Keir Fraser

OK. I will update the patch according to the policy you described. Thanks!

Shan Haitao

2010/10/28 Tim Deegan <Tim.Deegan@citrix.com>:
> Hi,
>
> At 03:32 +0100 on 28 Oct (1288236759), Haitao Shan wrote:
>> >> diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
>> >> --- a/xen/arch/x86/hvm/hvm.c  Wed Oct 27 21:55:45 2010 +0800
>> >> +++ b/xen/arch/x86/hvm/hvm.c  Wed Oct 27 22:17:24 2010 +0800
>> >> @@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
>> >>          vc = &v->arch.guest_context;
>> >>
>> >>          if ( v->fpu_initialised )
>> >> -            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
>> >> -        else
>> >> +            if ( cpu_has_xsave )
>> >> +                /* to restore guest img saved on xsave-incapable host */
>> >> +                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
>> >> +                       sizeof(ctxt.fpu_regs));
>> >> +            else
>> >> +                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>> >
>> > I think this hunk belongs in hvm_LOAD_cpu_ctxt()!
>> I once did the same as you said. But doing this in hvm_load_cpu_ctxt
>> will depends on two:
>> 1. hvm_load_cpu_ctxt can not be executed before xsave restore routine
>> is executed. Otherwise, xsave_area contains no useful data at the time
>> of copying.
>
> OK; then you should copy the other way in in the xsave load routine as
> well.  Xsave load will always happen after the CPU load since save
> records are always written in increasing order of type.
>
> That way, if the save file has no xsave record, the new domain's xsave
> state is initalized from the fpu record, and if it does then the fpu
> state is initialized from the xsave record.  I think that's the
> behaviour you want.
>
> In any case this is *definitely* wrong where it is because the memcpy
> arguments are the wrong way round. :)
>
>> 2. It seems to break restore when HVM guest (no touching eXtended
>> States at all) saved on a Xsave-capable host is later restored on a
>> Xsave-incapable host.
>
> That not a safe thing to do anyway -- once you've told the guest (via
> CPUID) that XSAVE is available you can't migrate it to a host where it's
> not supported.
>
> Cheers,
>
> Tim.
>
> --
> Tim Deegan <Tim.Deegan@citrix.com>
> Principal Software Engineer, XenServer Engineering
> Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)
>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Patch 4/4] Refining Xsave/Xrestore support
  2010-10-28 11:28         ` Haitao Shan
@ 2010-10-28 13:05           ` Keir Fraser
  0 siblings, 0 replies; 11+ messages in thread
From: Keir Fraser @ 2010-10-28 13:05 UTC (permalink / raw)
  To: Haitao Shan, Tim Deegan; +Cc: xen-devel, Keir Fraser

At this point please go apply all requested changes and resubmit the patch
series in its entirety. I've flushed old versions from my queue.

 -- Keir

On 28/10/2010 12:28, "Haitao Shan" <maillists.shan@gmail.com> wrote:

> OK. I will update the patch according to the policy you described. Thanks!
> 
> Shan Haitao
> 
> 2010/10/28 Tim Deegan <Tim.Deegan@citrix.com>:
>> Hi,
>> 
>> At 03:32 +0100 on 28 Oct (1288236759), Haitao Shan wrote:
>>>>> diff -r 9bf6b4030d70 xen/arch/x86/hvm/hvm.c
>>>>> --- a/xen/arch/x86/hvm/hvm.c  Wed Oct 27 21:55:45 2010 +0800
>>>>> +++ b/xen/arch/x86/hvm/hvm.c  Wed Oct 27 22:17:24 2010 +0800
>>>>> @@ -575,8 +575,13 @@ static int hvm_save_cpu_ctxt(struct doma
>>>>>          vc = &v->arch.guest_context;
>>>>> 
>>>>>          if ( v->fpu_initialised )
>>>>> -            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
>>>>> -        else
>>>>> +            if ( cpu_has_xsave )
>>>>> +                /* to restore guest img saved on xsave-incapable host */
>>>>> +                memcpy(v->arch.xsave_area, ctxt.fpu_regs,
>>>>> +                       sizeof(ctxt.fpu_regs));
>>>>> +            else
>>>>> +                memcpy(&vc->fpu_ctxt, ctxt.fpu_regs,
>>>>> sizeof(ctxt.fpu_regs));
>>>> 
>>>> I think this hunk belongs in hvm_LOAD_cpu_ctxt()!
>>> I once did the same as you said. But doing this in hvm_load_cpu_ctxt
>>> will depends on two:
>>> 1. hvm_load_cpu_ctxt can not be executed before xsave restore routine
>>> is executed. Otherwise, xsave_area contains no useful data at the time
>>> of copying.
>> 
>> OK; then you should copy the other way in in the xsave load routine as
>> well.  Xsave load will always happen after the CPU load since save
>> records are always written in increasing order of type.
>> 
>> That way, if the save file has no xsave record, the new domain's xsave
>> state is initalized from the fpu record, and if it does then the fpu
>> state is initialized from the xsave record.  I think that's the
>> behaviour you want.
>> 
>> In any case this is *definitely* wrong where it is because the memcpy
>> arguments are the wrong way round. :)
>> 
>>> 2. It seems to break restore when HVM guest (no touching eXtended
>>> States at all) saved on a Xsave-capable host is later restored on a
>>> Xsave-incapable host.
>> 
>> That not a safe thing to do anyway -- once you've told the guest (via
>> CPUID) that XSAVE is available you can't migrate it to a host where it's
>> not supported.
>> 
>> Cheers,
>> 
>> Tim.
>> 
>> --
>> Tim Deegan <Tim.Deegan@citrix.com>
>> Principal Software Engineer, XenServer Engineering
>> Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)
>> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2010-10-28 13:05 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-10-27  7:05 [Patch 4/4] Refining Xsave/Xrestore support Haitao Shan
2010-10-27 10:25 ` Tim Deegan
     [not found]   ` <AANLkTi=1fW3rQL+8SRAzYv3D6Lqo2PGC7uYzd5VkX8hw@mail.gmail.com>
2010-10-28  2:32     ` Fwd: " Haitao Shan
2010-10-28  9:18       ` Tim Deegan
2010-10-28 11:28         ` Haitao Shan
2010-10-28 13:05           ` Keir Fraser
2010-10-28  4:57   ` Haitao Shan
2010-10-27 10:39 ` Jan Beulich
2010-10-28  2:52   ` Haitao Shan
2010-10-28  7:21     ` Jan Beulich
2010-10-28  7:34       ` Haitao Shan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.