All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state
@ 2017-09-12 13:53 Alexandru Isaila
  2017-09-18 15:35 ` Jan Beulich
  0 siblings, 1 reply; 6+ messages in thread
From: Alexandru Isaila @ 2017-09-12 13:53 UTC (permalink / raw)
  To: xen-devel; +Cc: Alexandru Isaila, andrew.cooper3, jbeulich

This patch adds the hvm_save_one_cpu_ctxt() function, called for the
XEN_DOMCTL_gethvmcontext_partial domctl.
It optimizes by only pausing the vcpu, and no longer the whole domain.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/arch/x86/domctl.c         |  20 +++++
 xen/arch/x86/hvm/hvm.c        | 194 ++++++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/hvm.h |   2 +
 3 files changed, 122 insertions(+), 94 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 127c84e..6c55622 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -625,6 +625,26 @@ long arch_do_domctl(
              !is_hvm_domain(d) )
             break;
 
+        if ( domctl->u.hvmcontext_partial.type == HVM_SAVE_CODE(CPU) &&
+             domctl->u.hvmcontext_partial.instance < d->max_vcpus )
+        {
+             struct vcpu *v = d->vcpu[domctl->u.hvmcontext_partial.instance];
+             struct hvm_hw_cpu ctx;
+
+             vcpu_pause(v);
+
+             hvm_save_one_cpu_ctxt(v, &ctx);
+
+             vcpu_unpause(v);
+
+             if ( copy_to_guest(domctl->u.hvmcontext_partial.buffer,
+                (void *)&ctx, sizeof(ctx)) )
+                ret = -EFAULT;
+             else
+                ret = 0;
+             break;
+        }
+
         domain_pause(d);
         ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
                            domctl->u.hvmcontext_partial.instance,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6cb903d..23f624b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -768,11 +768,109 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+void hvm_save_one_cpu_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
+{
+    struct segment_register seg;
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(v, ctxt);
+
+    ctxt->tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm_domain.sync_tsc);
+
+    ctxt->msr_tsc_aux = hvm_msr_tsc_aux(v);
+
+    hvm_get_segment_register(v, x86_seg_idtr, &seg);
+    ctxt->idtr_limit = seg.limit;
+    ctxt->idtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+    ctxt->gdtr_limit = seg.limit;
+    ctxt->gdtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_cs, &seg);
+    ctxt->cs_sel = seg.sel;
+    ctxt->cs_limit = seg.limit;
+    ctxt->cs_base = seg.base;
+    ctxt->cs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ds, &seg);
+    ctxt->ds_sel = seg.sel;
+    ctxt->ds_limit = seg.limit;
+    ctxt->ds_base = seg.base;
+    ctxt->ds_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_es, &seg);
+    ctxt->es_sel = seg.sel;
+    ctxt->es_limit = seg.limit;
+    ctxt->es_base = seg.base;
+    ctxt->es_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ss, &seg);
+    ctxt->ss_sel = seg.sel;
+    ctxt->ss_limit = seg.limit;
+    ctxt->ss_base = seg.base;
+    ctxt->ss_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_fs, &seg);
+    ctxt->fs_sel = seg.sel;
+    ctxt->fs_limit = seg.limit;
+    ctxt->fs_base = seg.base;
+    ctxt->fs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_gs, &seg);
+    ctxt->gs_sel = seg.sel;
+    ctxt->gs_limit = seg.limit;
+    ctxt->gs_base = seg.base;
+    ctxt->gs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_tr, &seg);
+    ctxt->tr_sel = seg.sel;
+    ctxt->tr_limit = seg.limit;
+    ctxt->tr_base = seg.base;
+    ctxt->tr_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+    ctxt->ldtr_sel = seg.sel;
+    ctxt->ldtr_limit = seg.limit;
+    ctxt->ldtr_base = seg.base;
+    ctxt->ldtr_arbytes = seg.attr;
+
+    if ( v->fpu_initialised )
+    {
+        memcpy(ctxt->fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt->fpu_regs));
+        ctxt->flags = XEN_X86_FPU_INITIALISED;
+    }
+
+    ctxt->rax = v->arch.user_regs.eax;
+    ctxt->rbx = v->arch.user_regs.ebx;
+    ctxt->rcx = v->arch.user_regs.ecx;
+    ctxt->rdx = v->arch.user_regs.edx;
+    ctxt->rbp = v->arch.user_regs.ebp;
+    ctxt->rsi = v->arch.user_regs.esi;
+    ctxt->rdi = v->arch.user_regs.edi;
+    ctxt->rsp = v->arch.user_regs.esp;
+    ctxt->rip = v->arch.user_regs.eip;
+    ctxt->rflags = v->arch.user_regs.eflags;
+    ctxt->r8  = v->arch.user_regs.r8;
+    ctxt->r9  = v->arch.user_regs.r9;
+    ctxt->r10 = v->arch.user_regs.r10;
+    ctxt->r11 = v->arch.user_regs.r11;
+    ctxt->r12 = v->arch.user_regs.r12;
+    ctxt->r13 = v->arch.user_regs.r13;
+    ctxt->r14 = v->arch.user_regs.r14;
+    ctxt->r15 = v->arch.user_regs.r15;
+    ctxt->dr0 = v->arch.debugreg[0];
+    ctxt->dr1 = v->arch.debugreg[1];
+    ctxt->dr2 = v->arch.debugreg[2];
+    ctxt->dr3 = v->arch.debugreg[3];
+    ctxt->dr6 = v->arch.debugreg[6];
+    ctxt->dr7 = v->arch.debugreg[7];
+}
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
-    struct segment_register seg;
 
     for_each_vcpu ( d, v )
     {
@@ -783,99 +881,7 @@ static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 
         memset(&ctxt, 0, sizeof(ctxt));
 
-        /* Architecture-specific vmcs/vmcb bits */
-        hvm_funcs.save_cpu_ctxt(v, &ctxt);
-
-        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
-
-        ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
-
-        hvm_get_segment_register(v, x86_seg_idtr, &seg);
-        ctxt.idtr_limit = seg.limit;
-        ctxt.idtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_gdtr, &seg);
-        ctxt.gdtr_limit = seg.limit;
-        ctxt.gdtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_cs, &seg);
-        ctxt.cs_sel = seg.sel;
-        ctxt.cs_limit = seg.limit;
-        ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ds, &seg);
-        ctxt.ds_sel = seg.sel;
-        ctxt.ds_limit = seg.limit;
-        ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_es, &seg);
-        ctxt.es_sel = seg.sel;
-        ctxt.es_limit = seg.limit;
-        ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ss, &seg);
-        ctxt.ss_sel = seg.sel;
-        ctxt.ss_limit = seg.limit;
-        ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_fs, &seg);
-        ctxt.fs_sel = seg.sel;
-        ctxt.fs_limit = seg.limit;
-        ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_gs, &seg);
-        ctxt.gs_sel = seg.sel;
-        ctxt.gs_limit = seg.limit;
-        ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_tr, &seg);
-        ctxt.tr_sel = seg.sel;
-        ctxt.tr_limit = seg.limit;
-        ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ldtr, &seg);
-        ctxt.ldtr_sel = seg.sel;
-        ctxt.ldtr_limit = seg.limit;
-        ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr;
-
-        if ( v->fpu_initialised )
-        {
-            memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-            ctxt.flags = XEN_X86_FPU_INITIALISED;
-        }
-
-        ctxt.rax = v->arch.user_regs.rax;
-        ctxt.rbx = v->arch.user_regs.rbx;
-        ctxt.rcx = v->arch.user_regs.rcx;
-        ctxt.rdx = v->arch.user_regs.rdx;
-        ctxt.rbp = v->arch.user_regs.rbp;
-        ctxt.rsi = v->arch.user_regs.rsi;
-        ctxt.rdi = v->arch.user_regs.rdi;
-        ctxt.rsp = v->arch.user_regs.rsp;
-        ctxt.rip = v->arch.user_regs.rip;
-        ctxt.rflags = v->arch.user_regs.rflags;
-        ctxt.r8  = v->arch.user_regs.r8;
-        ctxt.r9  = v->arch.user_regs.r9;
-        ctxt.r10 = v->arch.user_regs.r10;
-        ctxt.r11 = v->arch.user_regs.r11;
-        ctxt.r12 = v->arch.user_regs.r12;
-        ctxt.r13 = v->arch.user_regs.r13;
-        ctxt.r14 = v->arch.user_regs.r14;
-        ctxt.r15 = v->arch.user_regs.r15;
-        ctxt.dr0 = v->arch.debugreg[0];
-        ctxt.dr1 = v->arch.debugreg[1];
-        ctxt.dr2 = v->arch.debugreg[2];
-        ctxt.dr3 = v->arch.debugreg[3];
-        ctxt.dr6 = v->arch.debugreg[6];
-        ctxt.dr7 = v->arch.debugreg[7];
+        hvm_save_one_cpu_ctxt(v, &ctxt);
 
         if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1; 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index b687e03..c4b7b3d 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -609,6 +609,8 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
     return false;
 }
 
+void hvm_save_one_cpu_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt);
+
 /* Check CR4/EFER values */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
                            signed int cr0_pg);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2017-09-12 13:53 [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state Alexandru Isaila
@ 2017-09-18 15:35 ` Jan Beulich
  2017-09-18 17:03   ` Razvan Cojocaru
  0 siblings, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2017-09-18 15:35 UTC (permalink / raw)
  To: Alexandru Isaila; +Cc: andrew.cooper3, xen-devel

>>> On 12.09.17 at 15:53, <aisaila@bitdefender.com> wrote:
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -625,6 +625,26 @@ long arch_do_domctl(
>               !is_hvm_domain(d) )
>              break;
>  
> +        if ( domctl->u.hvmcontext_partial.type == HVM_SAVE_CODE(CPU) &&
> +             domctl->u.hvmcontext_partial.instance < d->max_vcpus )

I have to admit that I'm not in favor of such special casing, even
less so without any code comment saying why this is so special.
What if someone else wanted some other piece of vCPU state
without pausing the entire domain? Wouldn't it be possible to
generalize this to cover all such state elements?

> +        {
> +             struct vcpu *v = d->vcpu[domctl->u.hvmcontext_partial.instance];
> +             struct hvm_hw_cpu ctx;
> +
> +             vcpu_pause(v);
> +
> +             hvm_save_one_cpu_ctxt(v, &ctx);
> +
> +             vcpu_unpause(v);
> +
> +             if ( copy_to_guest(domctl->u.hvmcontext_partial.buffer,
> +                (void *)&ctx, sizeof(ctx)) )

Indentation.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2017-09-18 15:35 ` Jan Beulich
@ 2017-09-18 17:03   ` Razvan Cojocaru
  2017-09-19  6:11     ` Jan Beulich
  0 siblings, 1 reply; 6+ messages in thread
From: Razvan Cojocaru @ 2017-09-18 17:03 UTC (permalink / raw)
  To: xen-devel

On 09/18/2017 06:35 PM, Jan Beulich wrote:
>>>> On 12.09.17 at 15:53, <aisaila@bitdefender.com> wrote:
>> --- a/xen/arch/x86/domctl.c
>> +++ b/xen/arch/x86/domctl.c
>> @@ -625,6 +625,26 @@ long arch_do_domctl(
>>               !is_hvm_domain(d) )
>>              break;
>>  
>> +        if ( domctl->u.hvmcontext_partial.type == HVM_SAVE_CODE(CPU) &&
>> +             domctl->u.hvmcontext_partial.instance < d->max_vcpus )
> 
> I have to admit that I'm not in favor of such special casing, even
> less so without any code comment saying why this is so special.
> What if someone else wanted some other piece of vCPU state
> without pausing the entire domain? Wouldn't it be possible to
> generalize this to cover all such state elements?

There's no reason why all the other cases where this would the possible
shouldn't be optimized. What has made this one stand out for us is that
we're using it a lot with introspection, and the optimization counts.

But judging by the code reorganization (the addition of
hvm_save_one_cpu_ctxt()), the changes would need to be done on a
one-by-one case anyway (different queries may require different ways of
chaging the code).



Thanks,
Razvan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2017-09-18 17:03   ` Razvan Cojocaru
@ 2017-09-19  6:11     ` Jan Beulich
  2017-09-19 15:28       ` Alexandru Stefan ISAILA
  0 siblings, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2017-09-19  6:11 UTC (permalink / raw)
  To: rcojocaru; +Cc: xen-devel

>>> Razvan Cojocaru <rcojocaru@bitdefender.com> 09/18/17 7:05 PM >>>
>On 09/18/2017 06:35 PM, Jan Beulich wrote:
>>>>> On 12.09.17 at 15:53, <aisaila@bitdefender.com> wrote:
>>> --- a/xen/arch/x86/domctl.c
>>> +++ b/xen/arch/x86/domctl.c
>>> @@ -625,6 +625,26 @@ long arch_do_domctl(
>>>               !is_hvm_domain(d) )
>>>              break;
>>>  
>>> +        if ( domctl->u.hvmcontext_partial.type == HVM_SAVE_CODE(CPU) &&
>>> +             domctl->u.hvmcontext_partial.instance < d->max_vcpus )
>> 
>> I have to admit that I'm not in favor of such special casing, even
>> less so without any code comment saying why this is so special.
>> What if someone else wanted some other piece of vCPU state
>> without pausing the entire domain? Wouldn't it be possible to
>> generalize this to cover all such state elements?
>
>There's no reason why all the other cases where this would the possible
>shouldn't be optimized. What has made this one stand out for us is that
>we're using it a lot with introspection, and the optimization counts.
>
>But judging by the code reorganization (the addition of
>hvm_save_one_cpu_ctxt()), the changes would need to be done on a
>one-by-one case anyway (different queries may require different ways of
>chaging the code).

But this function addition is precisely what I'd like to avoid in favor of
an extension to the existing mechanism using the registered function
pointers.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2017-09-19  6:11     ` Jan Beulich
@ 2017-09-19 15:28       ` Alexandru Stefan ISAILA
  2017-09-19 15:44         ` Jan Beulich
  0 siblings, 1 reply; 6+ messages in thread
From: Alexandru Stefan ISAILA @ 2017-09-19 15:28 UTC (permalink / raw)
  To: rcojocaru, jbeulich; +Cc: xen-devel

On Ma, 2017-09-19 at 00:11 -0600, Jan Beulich wrote:
> >
> > >
> > > >
> > > > Razvan Cojocaru <rcojocaru@bitdefender.com> 09/18/17 7:05 PM
> > > > >>>
> > On 09/18/2017 06:35 PM, Jan Beulich wrote:
> > >
> > > >
> > > > >
> > > > > >
> > > > > > On 12.09.17 at 15:53, <aisaila@bitdefender.com> wrote:
> > > > --- a/xen/arch/x86/domctl.c
> > > > +++ b/xen/arch/x86/domctl.c
> > > > @@ -625,6 +625,26 @@ long arch_do_domctl(
> > > >               !is_hvm_domain(d) )
> > > >              break;
> > > >
> > > > +        if ( domctl->u.hvmcontext_partial.type ==
> > > > HVM_SAVE_CODE(CPU) &&
> > > > +             domctl->u.hvmcontext_partial.instance < d-
> > > > >max_vcpus )
> > > I have to admit that I'm not in favor of such special casing,
> > > even
> > > less so without any code comment saying why this is so special.
> > > What if someone else wanted some other piece of vCPU state
> > > without pausing the entire domain? Wouldn't it be possible to
> > > generalize this to cover all such state elements?
> > There's no reason why all the other cases where this would the
> > possible
> > shouldn't be optimized. What has made this one stand out for us is
> > that
> > we're using it a lot with introspection, and the optimization
> > counts.
> >
> > But judging by the code reorganization (the addition of
> > hvm_save_one_cpu_ctxt()), the changes would need to be done on a
> > one-by-one case anyway (different queries may require different
> > ways of
> > chaging the code).
> But this function addition is precisely what I'd like to avoid in
> favor of
> an extension to the existing mechanism using the registered function
> pointers.
>
What will be a suitable extend of the current call back system?

Regards,
Alex


________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state
  2017-09-19 15:28       ` Alexandru Stefan ISAILA
@ 2017-09-19 15:44         ` Jan Beulich
  0 siblings, 0 replies; 6+ messages in thread
From: Jan Beulich @ 2017-09-19 15:44 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA; +Cc: rcojocaru, xen-devel

>>> On 19.09.17 at 17:28, <aisaila@bitdefender.com> wrote:
> On Ma, 2017-09-19 at 00:11 -0600, Jan Beulich wrote:
>> > > > Razvan Cojocaru <rcojocaru@bitdefender.com> 09/18/17 7:05 PM
>> > On 09/18/2017 06:35 PM, Jan Beulich wrote:
>> > > > > > On 12.09.17 at 15:53, <aisaila@bitdefender.com> wrote:
>> > > > --- a/xen/arch/x86/domctl.c
>> > > > +++ b/xen/arch/x86/domctl.c
>> > > > @@ -625,6 +625,26 @@ long arch_do_domctl(
>> > > >               !is_hvm_domain(d) )
>> > > >              break;
>> > > >
>> > > > +        if ( domctl->u.hvmcontext_partial.type ==
>> > > > HVM_SAVE_CODE(CPU) &&
>> > > > +             domctl->u.hvmcontext_partial.instance < d-
>> > > > >max_vcpus )
>> > > I have to admit that I'm not in favor of such special casing,
>> > > even
>> > > less so without any code comment saying why this is so special.
>> > > What if someone else wanted some other piece of vCPU state
>> > > without pausing the entire domain? Wouldn't it be possible to
>> > > generalize this to cover all such state elements?
>> > There's no reason why all the other cases where this would the
>> > possible
>> > shouldn't be optimized. What has made this one stand out for us is
>> > that
>> > we're using it a lot with introspection, and the optimization
>> > counts.
>> >
>> > But judging by the code reorganization (the addition of
>> > hvm_save_one_cpu_ctxt()), the changes would need to be done on a
>> > one-by-one case anyway (different queries may require different
>> > ways of
>> > chaging the code).
>> But this function addition is precisely what I'd like to avoid in
>> favor of
>> an extension to the existing mechanism using the registered function
>> pointers.
>>
> What will be a suitable extend of the current call back system?

I'm not sure what you expect as an answer here. Something
following the current model, but skipping everything that's
not per-vCPU, and for everything being per-vCPU handling just
the single vCPU of interest.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-09-19 15:44 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-12 13:53 [PATCH] x86/domctl: Don't pause the whole domain if only getting vcpu state Alexandru Isaila
2017-09-18 15:35 ` Jan Beulich
2017-09-18 17:03   ` Razvan Cojocaru
2017-09-19  6:11     ` Jan Beulich
2017-09-19 15:28       ` Alexandru Stefan ISAILA
2017-09-19 15:44         ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.