All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
@ 2018-10-24  9:19 Alexandru Stefan ISAILA
  2018-10-24  9:23 ` Razvan Cojocaru
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Alexandru Stefan ISAILA @ 2018-10-24  9:19 UTC (permalink / raw)
  To: xen-devel
  Cc: kevin.tian, tamas, wei.liu2, jbeulich, rcojocaru, jun.nakajima,
	andrew.cooper3, paul.durrant, suravee.suthikulpanit,
	Alexandru Stefan ISAILA, boris.ostrovsky, brian.woods

The may_defer var was left with the older bool_t type. This patch
changes the type to bool.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/arch/x86/hvm/emulate.c        |  8 ++++----
 xen/arch/x86/hvm/hvm.c            | 14 +++++++-------
 xen/arch/x86/hvm/svm/nestedsvm.c  | 14 +++++++-------
 xen/arch/x86/hvm/svm/svm.c        |  2 +-
 xen/arch/x86/hvm/vm_event.c       |  8 ++++----
 xen/arch/x86/hvm/vmx/vmx.c        |  4 ++--
 xen/arch/x86/hvm/vmx/vvmx.c       | 16 ++++++++--------
 xen/include/asm-x86/hvm/support.h |  6 +++---
 8 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index cd1d9a7c57..9e7deaa6cd 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2024,7 +2024,7 @@ static int hvmemul_write_cr(
     switch ( reg )
     {
     case 0:
-        rc = hvm_set_cr0(val, 1);
+        rc = hvm_set_cr0(val, true);
         break;
 
     case 2:
@@ -2033,11 +2033,11 @@ static int hvmemul_write_cr(
         break;
 
     case 3:
-        rc = hvm_set_cr3(val, 1);
+        rc = hvm_set_cr3(val, true);
         break;
 
     case 4:
-        rc = hvm_set_cr4(val, 1);
+        rc = hvm_set_cr4(val, true);
         break;
 
     default:
@@ -2092,7 +2092,7 @@ static int hvmemul_write_msr(
     uint64_t val,
     struct x86_emulate_ctxt *ctxt)
 {
-    int rc = hvm_msr_write_intercept(reg, val, 1);
+    int rc = hvm_msr_write_intercept(reg, val, true);
 
     if ( rc == X86EMUL_EXCEPTION )
         x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index af13de3745..a140e60c9c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2038,15 +2038,15 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
     switch ( cr )
     {
     case 0:
-        rc = hvm_set_cr0(val, 1);
+        rc = hvm_set_cr0(val, true);
         break;
 
     case 3:
-        rc = hvm_set_cr3(val, 1);
+        rc = hvm_set_cr3(val, true);
         break;
 
     case 4:
-        rc = hvm_set_cr4(val, 1);
+        rc = hvm_set_cr4(val, true);
         break;
 
     case 8:
@@ -2142,7 +2142,7 @@ static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
     hvm_update_guest_cr(v, cr);
 }
 
-int hvm_set_cr0(unsigned long value, bool_t may_defer)
+int hvm_set_cr0(unsigned long value, bool may_defer)
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
@@ -2260,7 +2260,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
     return X86EMUL_OKAY;
 }
 
-int hvm_set_cr3(unsigned long value, bool_t may_defer)
+int hvm_set_cr3(unsigned long value, bool may_defer)
 {
     struct vcpu *v = current;
     struct page_info *page;
@@ -2314,7 +2314,7 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
     return X86EMUL_UNHANDLEABLE;
 }
 
-int hvm_set_cr4(unsigned long value, bool_t may_defer)
+int hvm_set_cr4(unsigned long value, bool may_defer)
 {
     struct vcpu *v = current;
     unsigned long old_cr;
@@ -2981,7 +2981,7 @@ void hvm_task_switch(
     if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
         goto out;
 
-    rc = hvm_set_cr3(tss.cr3, 1);
+    rc = hvm_set_cr3(tss.cr3, true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
     if ( rc != X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 78a1016e94..088b3fd562 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -285,7 +285,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
 
     /* CR4 */
     v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
-    rc = hvm_set_cr4(n1vmcb->_cr4, 1);
+    rc = hvm_set_cr4(n1vmcb->_cr4, true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
     if (rc != X86EMUL_OKAY)
@@ -296,7 +296,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
         svm->ns_cr0, v->arch.hvm.guest_cr[0]);
     v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
     n1vmcb->rflags &= ~X86_EFLAGS_VM;
-    rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
+    rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
     if (rc != X86EMUL_OKAY)
@@ -324,7 +324,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
         v->arch.guest_table = pagetable_null();
         /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
     }
-    rc = hvm_set_cr3(n1vmcb->_cr3, 1);
+    rc = hvm_set_cr3(n1vmcb->_cr3, true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
     if (rc != X86EMUL_OKAY)
@@ -556,7 +556,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
 
     /* CR4 */
     v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
-    rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
+    rc = hvm_set_cr4(ns_vmcb->_cr4, true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
     if (rc != X86EMUL_OKAY)
@@ -566,7 +566,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
     svm->ns_cr0 = v->arch.hvm.guest_cr[0];
     cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
     v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
-    rc = hvm_set_cr0(cr0, 1);
+    rc = hvm_set_cr0(cr0, true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
     if (rc != X86EMUL_OKAY)
@@ -584,7 +584,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
         nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
 
         /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
-        rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+        rc = hvm_set_cr3(ns_vmcb->_cr3, true);
         if ( rc == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         if (rc != X86EMUL_OKAY)
@@ -598,7 +598,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
          * we assume it intercepts page faults.
          */
         /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
-        rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+        rc = hvm_set_cr3(ns_vmcb->_cr3, true);
         if ( rc == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         if (rc != X86EMUL_OKAY)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index dd0aca4f53..d553614c14 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2332,7 +2332,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
             msr_split(regs, msr_content);
     }
     else
-        rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1);
+        rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true);
 
     if ( rc == X86EMUL_OKAY )
         __update_guest_eip(regs, inst_len);
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 28d08a6630..0df8ab40e6 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -94,7 +94,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
 
     if ( unlikely(w->do_write.cr0) )
     {
-        if ( hvm_set_cr0(w->cr0, 0) == X86EMUL_EXCEPTION )
+        if ( hvm_set_cr0(w->cr0, false) == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
         w->do_write.cr0 = 0;
@@ -102,7 +102,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
 
     if ( unlikely(w->do_write.cr4) )
     {
-        if ( hvm_set_cr4(w->cr4, 0) == X86EMUL_EXCEPTION )
+        if ( hvm_set_cr4(w->cr4, false) == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
         w->do_write.cr4 = 0;
@@ -110,7 +110,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
 
     if ( unlikely(w->do_write.cr3) )
     {
-        if ( hvm_set_cr3(w->cr3, 0) == X86EMUL_EXCEPTION )
+        if ( hvm_set_cr3(w->cr3, false) == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
         w->do_write.cr3 = 0;
@@ -118,7 +118,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
 
     if ( unlikely(w->do_write.msr) )
     {
-        if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+        if ( hvm_msr_write_intercept(w->msr, w->value, false) ==
              X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d16129fb59..d574f4a2d1 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2654,7 +2654,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
                  (X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
         HVMTRACE_LONG_1D(LMSW, value);
 
-        if ( (rc = hvm_set_cr0(value, 1)) == X86EMUL_EXCEPTION )
+        if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
         return rc;
@@ -3990,7 +3990,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     }
 
     case EXIT_REASON_MSR_WRITE:
-        switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1) )
+        switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true) )
         {
         case X86EMUL_OKAY:
             update_guest_eip(); /* Safe: WRMSR */
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 0e45db83e5..8d2fb63724 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1059,15 +1059,15 @@ static void load_shadow_guest_state(struct vcpu *v)
     nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW);
     nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW);
 
-    rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1);
+    rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
-    rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1);
+    rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
-    rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1);
+    rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
@@ -1077,7 +1077,7 @@ static void load_shadow_guest_state(struct vcpu *v)
     if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
     {
         rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
-                                     get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+                                     get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), false);
         if ( rc == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
     }
@@ -1265,15 +1265,15 @@ static void load_vvmcs_host_state(struct vcpu *v)
         __vmwrite(vmcs_h2g_field[i].guest_field, r);
     }
 
-    rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1);
+    rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
-    rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1);
+    rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
-    rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1);
+    rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), true);
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
@@ -1283,7 +1283,7 @@ static void load_vvmcs_host_state(struct vcpu *v)
     if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
     {
         rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
-                                     get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+                                     get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), true);
         if ( rc == X86EMUL_EXCEPTION )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
     }
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 7222939a6a..e989aa7349 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -134,9 +134,9 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
  * returned.
  */
 int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value, bool_t may_defer);
-int hvm_set_cr3(unsigned long value, bool_t may_defer);
-int hvm_set_cr4(unsigned long value, bool_t may_defer);
+int hvm_set_cr0(unsigned long value, bool may_defer);
+int hvm_set_cr3(unsigned long value, bool may_defer);
+int hvm_set_cr4(unsigned long value, bool may_defer);
 int hvm_descriptor_access_intercept(uint64_t exit_info,
                                     uint64_t vmx_exit_qualification,
                                     unsigned int descriptor, bool is_write);
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-10-24  9:19 [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers Alexandru Stefan ISAILA
@ 2018-10-24  9:23 ` Razvan Cojocaru
  2018-10-24  9:47 ` Wei Liu
  2018-10-30  6:19 ` Tian, Kevin
  2 siblings, 0 replies; 11+ messages in thread
From: Razvan Cojocaru @ 2018-10-24  9:23 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA, xen-devel
  Cc: kevin.tian, tamas, wei.liu2, jbeulich, jun.nakajima,
	andrew.cooper3, paul.durrant, suravee.suthikulpanit,
	boris.ostrovsky, brian.woods

On 10/24/18 12:19 PM, Alexandru Stefan ISAILA wrote:
> The may_defer var was left with the older bool_t type. This patch
> changes the type to bool.
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-10-24  9:19 [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers Alexandru Stefan ISAILA
  2018-10-24  9:23 ` Razvan Cojocaru
@ 2018-10-24  9:47 ` Wei Liu
  2018-10-26 15:48   ` Jan Beulich
  2018-10-30  6:19 ` Tian, Kevin
  2 siblings, 1 reply; 11+ messages in thread
From: Wei Liu @ 2018-10-24  9:47 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: kevin.tian, tamas, wei.liu2, suravee.suthikulpanit, rcojocaru,
	jun.nakajima, andrew.cooper3, paul.durrant, jbeulich, xen-devel,
	boris.ostrovsky, brian.woods

On Wed, Oct 24, 2018 at 09:19:06AM +0000, Alexandru Stefan ISAILA wrote:
> The may_defer var was left with the older bool_t type. This patch
> changes the type to bool.
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Reviewed-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-10-24  9:47 ` Wei Liu
@ 2018-10-26 15:48   ` Jan Beulich
  2018-10-26 15:52     ` Woods, Brian
  0 siblings, 1 reply; 11+ messages in thread
From: Jan Beulich @ 2018-10-26 15:48 UTC (permalink / raw)
  To: aisaila
  Cc: Kevin Tian, Tamas K Lengyel, Wei Liu, Jun Nakajima,
	Razvan Cojocaru, Andrew Cooper, Paul Durrant,
	Suravee Suthikulpanit, xen-devel, Boris Ostrovsky, Brian Woods

>>> On 24.10.18 at 11:47, <wei.liu2@citrix.com> wrote:
> On Wed, Oct 24, 2018 at 09:19:06AM +0000, Alexandru Stefan ISAILA wrote:
>> The may_defer var was left with the older bool_t type. This patch
>> changes the type to bool.
>> 
>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> 
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-10-26 15:48   ` Jan Beulich
@ 2018-10-26 15:52     ` Woods, Brian
  0 siblings, 0 replies; 11+ messages in thread
From: Woods, Brian @ 2018-10-26 15:52 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Tamas K Lengyel, Wei Liu, Jun Nakajima,
	Razvan Cojocaru, Andrew Cooper, Paul Durrant, Suthikulpanit,
	Suravee, aisaila, xen-devel, Boris Ostrovsky, Woods, Brian

On Fri, Oct 26, 2018 at 09:48:39AM -0600, Jan Beulich wrote:
> >>> On 24.10.18 at 11:47, <wei.liu2@citrix.com> wrote:
> > On Wed, Oct 24, 2018 at 09:19:06AM +0000, Alexandru Stefan ISAILA wrote:
> >> The may_defer var was left with the older bool_t type. This patch
> >> changes the type to bool.
> >> 
> >> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> > 
> > Reviewed-by: Wei Liu <wei.liu2@citrix.com>
> 
> Acked-by: Jan Beulich <jbeulich@suse.com>
> 
Acked-by: Brian Woods <brian.woods@amd.com>

-- 
Brian Woods

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-10-24  9:19 [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers Alexandru Stefan ISAILA
  2018-10-24  9:23 ` Razvan Cojocaru
  2018-10-24  9:47 ` Wei Liu
@ 2018-10-30  6:19 ` Tian, Kevin
  2018-11-01 10:31   ` Razvan Cojocaru
  2 siblings, 1 reply; 11+ messages in thread
From: Tian, Kevin @ 2018-10-30  6:19 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA, xen-devel
  Cc: tamas, wei.liu2, jbeulich, rcojocaru, Nakajima, Jun,
	andrew.cooper3, paul.durrant, suravee.suthikulpanit,
	boris.ostrovsky, brian.woods

> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
> Sent: Wednesday, October 24, 2018 5:19 PM
> 
> The may_defer var was left with the older bool_t type. This patch
> changes the type to bool.
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-10-30  6:19 ` Tian, Kevin
@ 2018-11-01 10:31   ` Razvan Cojocaru
  2018-11-01 13:58     ` Jan Beulich
  0 siblings, 1 reply; 11+ messages in thread
From: Razvan Cojocaru @ 2018-11-01 10:31 UTC (permalink / raw)
  To: Tian, Kevin, Alexandru Stefan ISAILA, xen-devel
  Cc: tamas, wei.liu2, jbeulich, Nakajima, Jun, andrew.cooper3,
	paul.durrant, suravee.suthikulpanit, boris.ostrovsky,
	brian.woods

On 10/30/18 8:19 AM, Tian, Kevin wrote:
>> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
>> Sent: Wednesday, October 24, 2018 5:19 PM
>>
>> The may_defer var was left with the older bool_t type. This patch
>> changes the type to bool.
>>
>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> 
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>

I think this trivial patch has all the acks it needs to go in?


Thanks,
Razvan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-11-01 10:31   ` Razvan Cojocaru
@ 2018-11-01 13:58     ` Jan Beulich
  2018-11-01 14:03       ` Paul Durrant
  2018-11-01 14:04       ` Razvan Cojocaru
  0 siblings, 2 replies; 11+ messages in thread
From: Jan Beulich @ 2018-11-01 13:58 UTC (permalink / raw)
  To: rcojocaru
  Cc: kevin.tian, tamas, wei.liu2, jun.nakajima, andrew.cooper3,
	paul.durrant, suravee.suthikulpanit, aisaila, xen-devel,
	boris.ostrovsky, brian.woods

>>> Razvan Cojocaru <rcojocaru@bitdefender.com> 11/01/18 11:31 AM >>>
>On 10/30/18 8:19 AM, Tian, Kevin wrote:
>>> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
>>> Sent: Wednesday, October 24, 2018 5:19 PM
>>>
>>> The may_defer var was left with the older bool_t type. This patch
>>> changes the type to bool.
>>>
>>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
>> 
>> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
>
>I think this trivial patch has all the acks it needs to go in?

It being trivial, I was considering to ignore the need for Paul's ack in this
case (which iirc has not been given so far), but since you ask - did you
check before asking? I can only re-iterate that generally it is the submitter
to chase acks, not any of the potential committers.


Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-11-01 13:58     ` Jan Beulich
@ 2018-11-01 14:03       ` Paul Durrant
  2018-11-01 14:04       ` Razvan Cojocaru
  1 sibling, 0 replies; 11+ messages in thread
From: Paul Durrant @ 2018-11-01 14:03 UTC (permalink / raw)
  To: 'Jan Beulich', rcojocaru
  Cc: Kevin Tian, tamas, Wei Liu, jun.nakajima, Andrew Cooper,
	suravee.suthikulpanit, aisaila, xen-devel, boris.ostrovsky,
	brian.woods

> -----Original Message-----
> From: Jan Beulich [mailto:jbeulich@suse.com]
> Sent: 01 November 2018 13:59
> To: rcojocaru@bitdefender.com
> Cc: brian.woods@amd.com; suravee.suthikulpanit@amd.com;
> aisaila@bitdefender.com; Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul
> Durrant <Paul.Durrant@citrix.com>; Wei Liu <wei.liu2@citrix.com>;
> jun.nakajima@intel.com; Kevin Tian <kevin.tian@intel.com>; xen-
> devel@lists.xenproject.org; boris.ostrovsky@oracle.com;
> tamas@tklengyel.com
> Subject: Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
> 
> >>> Razvan Cojocaru <rcojocaru@bitdefender.com> 11/01/18 11:31 AM >>>
> >On 10/30/18 8:19 AM, Tian, Kevin wrote:
> >>> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
> >>> Sent: Wednesday, October 24, 2018 5:19 PM
> >>>
> >>> The may_defer var was left with the older bool_t type. This patch
> >>> changes the type to bool.
> >>>
> >>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> >>
> >> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> >
> >I think this trivial patch has all the acks it needs to go in?
> 
> It being trivial, I was considering to ignore the need for Paul's ack in
> this
> case (which iirc has not been given so far), but since you ask - did you
> check before asking? I can only re-iterate that generally it is the
> submitter
> to chase acks, not any of the potential committers.
> 

Apologies. I missed this. Consider it...

Acked-by: Paul Durrant <paul.durrant@citrix.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-11-01 13:58     ` Jan Beulich
  2018-11-01 14:03       ` Paul Durrant
@ 2018-11-01 14:04       ` Razvan Cojocaru
  2018-11-01 14:10         ` Jan Beulich
  1 sibling, 1 reply; 11+ messages in thread
From: Razvan Cojocaru @ 2018-11-01 14:04 UTC (permalink / raw)
  To: Jan Beulich
  Cc: kevin.tian, tamas, wei.liu2, jun.nakajima, andrew.cooper3,
	paul.durrant, suravee.suthikulpanit, aisaila, xen-devel,
	boris.ostrovsky, brian.woods



On 11/1/18 3:58 PM, Jan Beulich wrote:
>>>> Razvan Cojocaru <rcojocaru@bitdefender.com> 11/01/18 11:31 AM >>>
>> On 10/30/18 8:19 AM, Tian, Kevin wrote:
>>>> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
>>>> Sent: Wednesday, October 24, 2018 5:19 PM
>>>>
>>>> The may_defer var was left with the older bool_t type. This patch
>>>> changes the type to bool.
>>>>
>>>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
>>>
>>> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
>>
>> I think this trivial patch has all the acks it needs to go in?
> 
> It being trivial, I was considering to ignore the need for Paul's ack in this
> case (which iirc has not been given so far), but since you ask - did you
> check before asking? I can only re-iterate that generally it is the submitter
> to chase acks, not any of the potential committers.

Sorry, I was not aware Paul's ack was still needed. By check, do you
mean take the recipients list (that I have from get_maintainer.pl) and
match it to the MAINTAINERS file to see if there's no ack from any
particular subcategory?


Thanks,
Razvan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers
  2018-11-01 14:04       ` Razvan Cojocaru
@ 2018-11-01 14:10         ` Jan Beulich
  0 siblings, 0 replies; 11+ messages in thread
From: Jan Beulich @ 2018-11-01 14:10 UTC (permalink / raw)
  To: rcojocaru
  Cc: kevin.tian, tamas, wei.liu2, jun.nakajima, andrew.cooper3,
	paul.durrant, suravee.suthikulpanit, aisaila, xen-devel,
	boris.ostrovsky, brian.woods

>>> Razvan Cojocaru <rcojocaru@bitdefender.com> 11/01/18 3:04 PM >>>
>On 11/1/18 3:58 PM, Jan Beulich wrote:
>>>>> Razvan Cojocaru <rcojocaru@bitdefender.com> 11/01/18 11:31 AM >>>
>>> On 10/30/18 8:19 AM, Tian, Kevin wrote:
>>>>> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
>>>>> Sent: Wednesday, October 24, 2018 5:19 PM
>>>>>
>>>>> The may_defer var was left with the older bool_t type. This patch
>>>>> changes the type to bool.
>>>>>
>>>>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
>>>>
>>>> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
>>>
>>> I think this trivial patch has all the acks it needs to go in?
>> 
>> It being trivial, I was considering to ignore the need for Paul's ack in this
>> case (which iirc has not been given so far), but since you ask - did you
>> check before asking? I can only re-iterate that generally it is the submitter
>> to chase acks, not any of the potential committers.
>
>Sorry, I was not aware Paul's ack was still needed. By check, do you
>mean take the recipients list (that I have from get_maintainer.pl) and
>match it to the MAINTAINERS file to see if there's no ack from any
>particular subcategory?

"Check" means by whatever approach you prefer, but yes, ultimately it
would be to compare against what ./MAINTAINERS has to say for every
one of the files changed in a patch. You'd then notice that a basic x86
ack is missing too, but I'm certainly in the position to deal with that if
everything else was ready for committing.


Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2018-11-01 14:10 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-24  9:19 [PATCH v1] x86/hvm: Clean up may_defer from hvm_* helpers Alexandru Stefan ISAILA
2018-10-24  9:23 ` Razvan Cojocaru
2018-10-24  9:47 ` Wei Liu
2018-10-26 15:48   ` Jan Beulich
2018-10-26 15:52     ` Woods, Brian
2018-10-30  6:19 ` Tian, Kevin
2018-11-01 10:31   ` Razvan Cojocaru
2018-11-01 13:58     ` Jan Beulich
2018-11-01 14:03       ` Paul Durrant
2018-11-01 14:04       ` Razvan Cojocaru
2018-11-01 14:10         ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.