All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/10] x86: register renaming (part I)
@ 2016-12-20  9:55 Jan Beulich
  2016-12-20 10:36 ` [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers Jan Beulich
                   ` (10 more replies)
  0 siblings, 11 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-20  9:55 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

This is a first (of three, as far as current plans go) steps to do away
with misleading register names (eax instead of rax).

01: x86/MSR: introduce MSR access split/fold helpers
02: x86/guest-walk: use unambiguous register names
03: x86/shadow: use unambiguous register names
04: x86/oprofile: use unambiguous register names
05: x86/HVM: use unambiguous register names
06: x86/HVMemul: use unambiguous register names
07: x86/SVM: use unambiguous register names
(VMX counterpart omitted for now, as I'll need to re-base)
08: x86/vm-event: use unambiguous register names
09: x86/traps: use unambiguous register names
10: x86/misc: use unambiguous register names

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
@ 2016-12-20 10:36 ` Jan Beulich
  2016-12-23  6:17   ` Tian, Kevin
  2016-12-20 10:36 ` [PATCH 02/10] x86/guest-walk: use unambiguous register names Jan Beulich
                   ` (9 subsequent siblings)
  10 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:36 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Suravee Suthikulpanit, George Dunlap, Andrew Cooper,
	Jun Nakajima, Boris Ostrovsky

[-- Attachment #1: Type: text/plain, Size: 5705 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3695,12 +3695,9 @@ static uint64_t _hvm_rdtsc_intercept(voi
 
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
 {
-    uint64_t tsc = _hvm_rdtsc_intercept();
+    msr_split(regs, _hvm_rdtsc_intercept());
 
-    regs->eax = (uint32_t)tsc;
-    regs->edx = (uint32_t)(tsc >> 32);
-
-    HVMTRACE_2D(RDTSC, regs->eax, regs->edx);
+    HVMTRACE_2D(RDTSC, regs->_eax, regs->_edx);
 }
 
 int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1936,14 +1936,10 @@ static void svm_do_msr_access(struct cpu
 
         rc = hvm_msr_read_intercept(regs->_ecx, &msr_content);
         if ( rc == X86EMUL_OKAY )
-        {
-            regs->rax = (uint32_t)msr_content;
-            regs->rdx = (uint32_t)(msr_content >> 32);
-        }
+            msr_split(regs, msr_content);
     }
     else
-        rc = hvm_msr_write_intercept(regs->_ecx,
-                                     (regs->rdx << 32) | regs->_eax, 1);
+        rc = hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1);
 
     if ( rc == X86EMUL_OKAY )
         __update_guest_eip(regs, inst_len);
@@ -2618,8 +2614,7 @@ void svm_vmexit_handler(struct cpu_user_
         if ( vmcb_get_cpl(vmcb) )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) &&
-                  hvm_handle_xsetbv(regs->ecx,
-                                    (regs->rdx << 32) | regs->_eax) == 0 )
+                  hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
             __update_guest_eip(regs, inst_len);
         break;
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3626,22 +3626,18 @@ void vmx_vmexit_handler(struct cpu_user_
     case EXIT_REASON_MSR_READ:
     {
         uint64_t msr_content;
-        if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY )
+        if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY )
         {
-            regs->eax = (uint32_t)msr_content;
-            regs->edx = (uint32_t)(msr_content >> 32);
+            msr_split(regs, msr_content);
             update_guest_eip(); /* Safe: RDMSR */
         }
         break;
     }
+
     case EXIT_REASON_MSR_WRITE:
-    {
-        uint64_t msr_content;
-        msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
-        if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) == X86EMUL_OKAY )
+        if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY )
             update_guest_eip(); /* Safe: WRMSR */
         break;
-    }
 
     case EXIT_REASON_VMXOFF:
         if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY )
@@ -3802,8 +3798,7 @@ void vmx_vmexit_handler(struct cpu_user_
         break;
 
     case EXIT_REASON_XSETBV:
-        if ( hvm_handle_xsetbv(regs->ecx,
-                               (regs->rdx << 32) | regs->_eax) == 0 )
+        if ( hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
             update_guest_eip(); /* Safe: XSETBV */
         break;
 
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2322,15 +2322,11 @@ int nvmx_n2_vmexit_handler(struct cpu_us
             nvcpu->nv_vmexit_pending = 1;
         else
         {
-            uint64_t tsc;
-
             /*
              * special handler is needed if L1 doesn't intercept rdtsc,
              * avoiding changing guest_tsc and messing up timekeeping in L1
              */
-            tsc = hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET);
-            regs->eax = (uint32_t)tsc;
-            regs->edx = (uint32_t)(tsc >> 32);
+            msr_split(regs, hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET));
             update_guest_eip();
 
             return 1;
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1918,13 +1918,10 @@ void pv_soft_rdtsc(struct vcpu *v, struc
 
     spin_unlock(&d->arch.vtsc_lock);
 
-    now = gtime_to_gtsc(d, now);
-
-    regs->eax = (uint32_t)now;
-    regs->edx = (uint32_t)(now >> 32);
+    msr_split(regs, gtime_to_gtsc(d, now));
 
     if ( rdtscp )
-         regs->ecx =
+         regs->rcx =
              (d->arch.tsc_mode == TSC_MODE_PVRDTSCP) ? d->arch.incarnation : 0;
 }
 
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3401,12 +3401,7 @@ if(rc) printk("%pv: %02x @ %08lx -> %d\n
             else if ( currd->arch.vtsc )
                 pv_soft_rdtsc(curr, regs, 0);
             else
-            {
-                uint64_t val = rdtsc();
-
-                regs->eax = (uint32_t)val;
-                regs->edx = (uint32_t)(val >> 32);
-            }
+                msr_split(regs, rdtsc());
         }
 
         if ( ctxt.ctxt.retire.singlestep )
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -71,6 +71,17 @@ static inline int wrmsr_safe(unsigned in
     return _rc;
 }
 
+static inline uint64_t msr_fold(const struct cpu_user_regs *regs)
+{
+    return (regs->rdx << 32) | regs->_eax;
+}
+
+static inline void msr_split(struct cpu_user_regs *regs, uint64_t val)
+{
+    regs->rdx = val >> 32;
+    regs->rax = (uint32_t)val;
+}
+
 static inline uint64_t rdtsc(void)
 {
     uint32_t low, high;



[-- Attachment #2: x86-regnames-MSR.patch --]
[-- Type: text/plain, Size: 5753 bytes --]

x86/MSR: introduce MSR access split/fold helpers

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3695,12 +3695,9 @@ static uint64_t _hvm_rdtsc_intercept(voi
 
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
 {
-    uint64_t tsc = _hvm_rdtsc_intercept();
+    msr_split(regs, _hvm_rdtsc_intercept());
 
-    regs->eax = (uint32_t)tsc;
-    regs->edx = (uint32_t)(tsc >> 32);
-
-    HVMTRACE_2D(RDTSC, regs->eax, regs->edx);
+    HVMTRACE_2D(RDTSC, regs->_eax, regs->_edx);
 }
 
 int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1936,14 +1936,10 @@ static void svm_do_msr_access(struct cpu
 
         rc = hvm_msr_read_intercept(regs->_ecx, &msr_content);
         if ( rc == X86EMUL_OKAY )
-        {
-            regs->rax = (uint32_t)msr_content;
-            regs->rdx = (uint32_t)(msr_content >> 32);
-        }
+            msr_split(regs, msr_content);
     }
     else
-        rc = hvm_msr_write_intercept(regs->_ecx,
-                                     (regs->rdx << 32) | regs->_eax, 1);
+        rc = hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1);
 
     if ( rc == X86EMUL_OKAY )
         __update_guest_eip(regs, inst_len);
@@ -2618,8 +2614,7 @@ void svm_vmexit_handler(struct cpu_user_
         if ( vmcb_get_cpl(vmcb) )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) &&
-                  hvm_handle_xsetbv(regs->ecx,
-                                    (regs->rdx << 32) | regs->_eax) == 0 )
+                  hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
             __update_guest_eip(regs, inst_len);
         break;
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3626,22 +3626,18 @@ void vmx_vmexit_handler(struct cpu_user_
     case EXIT_REASON_MSR_READ:
     {
         uint64_t msr_content;
-        if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY )
+        if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY )
         {
-            regs->eax = (uint32_t)msr_content;
-            regs->edx = (uint32_t)(msr_content >> 32);
+            msr_split(regs, msr_content);
             update_guest_eip(); /* Safe: RDMSR */
         }
         break;
     }
+
     case EXIT_REASON_MSR_WRITE:
-    {
-        uint64_t msr_content;
-        msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
-        if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) == X86EMUL_OKAY )
+        if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY )
             update_guest_eip(); /* Safe: WRMSR */
         break;
-    }
 
     case EXIT_REASON_VMXOFF:
         if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY )
@@ -3802,8 +3798,7 @@ void vmx_vmexit_handler(struct cpu_user_
         break;
 
     case EXIT_REASON_XSETBV:
-        if ( hvm_handle_xsetbv(regs->ecx,
-                               (regs->rdx << 32) | regs->_eax) == 0 )
+        if ( hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 )
             update_guest_eip(); /* Safe: XSETBV */
         break;
 
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2322,15 +2322,11 @@ int nvmx_n2_vmexit_handler(struct cpu_us
             nvcpu->nv_vmexit_pending = 1;
         else
         {
-            uint64_t tsc;
-
             /*
              * special handler is needed if L1 doesn't intercept rdtsc,
              * avoiding changing guest_tsc and messing up timekeeping in L1
              */
-            tsc = hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET);
-            regs->eax = (uint32_t)tsc;
-            regs->edx = (uint32_t)(tsc >> 32);
+            msr_split(regs, hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET));
             update_guest_eip();
 
             return 1;
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1918,13 +1918,10 @@ void pv_soft_rdtsc(struct vcpu *v, struc
 
     spin_unlock(&d->arch.vtsc_lock);
 
-    now = gtime_to_gtsc(d, now);
-
-    regs->eax = (uint32_t)now;
-    regs->edx = (uint32_t)(now >> 32);
+    msr_split(regs, gtime_to_gtsc(d, now));
 
     if ( rdtscp )
-         regs->ecx =
+         regs->rcx =
              (d->arch.tsc_mode == TSC_MODE_PVRDTSCP) ? d->arch.incarnation : 0;
 }
 
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3401,12 +3401,7 @@ if(rc) printk("%pv: %02x @ %08lx -> %d\n
             else if ( currd->arch.vtsc )
                 pv_soft_rdtsc(curr, regs, 0);
             else
-            {
-                uint64_t val = rdtsc();
-
-                regs->eax = (uint32_t)val;
-                regs->edx = (uint32_t)(val >> 32);
-            }
+                msr_split(regs, rdtsc());
         }
 
         if ( ctxt.ctxt.retire.singlestep )
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -71,6 +71,17 @@ static inline int wrmsr_safe(unsigned in
     return _rc;
 }
 
+static inline uint64_t msr_fold(const struct cpu_user_regs *regs)
+{
+    return (regs->rdx << 32) | regs->_eax;
+}
+
+static inline void msr_split(struct cpu_user_regs *regs, uint64_t val)
+{
+    regs->rdx = val >> 32;
+    regs->rax = (uint32_t)val;
+}
+
 static inline uint64_t rdtsc(void)
 {
     uint32_t low, high;

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 02/10] x86/guest-walk: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
  2016-12-20 10:36 ` [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers Jan Beulich
@ 2016-12-20 10:36 ` Jan Beulich
  2016-12-28 11:18   ` George Dunlap
  2016-12-20 10:38 ` [PATCH 03/10] x86/shadow: " Jan Beulich
                   ` (8 subsequent siblings)
  10 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:36 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 773 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -196,7 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
              *   - Page fault in kernel mode
              */
             smap = hvm_smap_enabled(v) &&
-                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
+                   ((hvm_get_cpl(v) == 3) || !(regs->_eflags & X86_EFLAGS_AC));
             break;
         case SMAP_CHECK_ENABLED:
             smap = hvm_smap_enabled(v);




[-- Attachment #2: x86-regnames-gw.patch --]
[-- Type: text/plain, Size: 817 bytes --]

x86/guest-walk: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -196,7 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
              *   - Page fault in kernel mode
              */
             smap = hvm_smap_enabled(v) &&
-                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
+                   ((hvm_get_cpl(v) == 3) || !(regs->_eflags & X86_EFLAGS_AC));
             break;
         case SMAP_CHECK_ENABLED:
             smap = hvm_smap_enabled(v);

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 03/10] x86/shadow: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
  2016-12-20 10:36 ` [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers Jan Beulich
  2016-12-20 10:36 ` [PATCH 02/10] x86/guest-walk: use unambiguous register names Jan Beulich
@ 2016-12-20 10:38 ` Jan Beulich
  2016-12-20 11:04   ` Tim Deegan
  2016-12-20 10:39 ` [PATCH 04/10] x86/oprofile: " Jan Beulich
                   ` (7 subsequent siblings)
  10 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:38 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 2476 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -348,10 +348,10 @@ const struct x86_emulate_ops *shadow_ini
     }
 
     /* Attempt to prefetch whole instruction. */
-    sh_ctxt->insn_buf_eip = regs->eip;
+    sh_ctxt->insn_buf_eip = regs->rip;
     sh_ctxt->insn_buf_bytes =
         (!hvm_translate_linear_addr(
-            x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+            x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
          !hvm_fetch_from_guest_linear(
              sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
@@ -374,18 +374,18 @@ void shadow_continue_emulation(struct sh
      * We don't refetch the segment bases, because we don't emulate
      * writes to segment registers
      */
-    diff = regs->eip - sh_ctxt->insn_buf_eip;
+    diff = regs->rip - sh_ctxt->insn_buf_eip;
     if ( diff > sh_ctxt->insn_buf_bytes )
     {
         /* Prefetch more bytes. */
         sh_ctxt->insn_buf_bytes =
             (!hvm_translate_linear_addr(
-                x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+                x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
                 hvm_access_insn_fetch, sh_ctxt, &addr) &&
              !hvm_fetch_from_guest_linear(
                  sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
             ? sizeof(sh_ctxt->insn_buf) : 0;
-        sh_ctxt->insn_buf_eip = regs->eip;
+        sh_ctxt->insn_buf_eip = regs->rip;
     }
 }
 
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2872,7 +2872,7 @@ static int sh_page_fault(struct vcpu *v,
 #endif
 
     SHADOW_PRINTK("%pv va=%#lx err=%#x, rip=%lx\n",
-                  v, va, regs->error_code, regs->eip);
+                  v, va, regs->error_code, regs->rip);
 
     perfc_incr(shadow_fault);
 
@@ -3357,8 +3357,7 @@ static int sh_page_fault(struct vcpu *v,
         }
     }
 
-    SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
-                  (unsigned long)regs->eip, (unsigned long)regs->esp);
+    SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", regs->rip, regs->rsp);
 
     emul_ops = shadow_init_emulation(&emul_ctxt, regs);
 




[-- Attachment #2: x86-regnames-shadow.patch --]
[-- Type: text/plain, Size: 2516 bytes --]

x86/shadow: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -348,10 +348,10 @@ const struct x86_emulate_ops *shadow_ini
     }
 
     /* Attempt to prefetch whole instruction. */
-    sh_ctxt->insn_buf_eip = regs->eip;
+    sh_ctxt->insn_buf_eip = regs->rip;
     sh_ctxt->insn_buf_bytes =
         (!hvm_translate_linear_addr(
-            x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+            x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
          !hvm_fetch_from_guest_linear(
              sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
@@ -374,18 +374,18 @@ void shadow_continue_emulation(struct sh
      * We don't refetch the segment bases, because we don't emulate
      * writes to segment registers
      */
-    diff = regs->eip - sh_ctxt->insn_buf_eip;
+    diff = regs->rip - sh_ctxt->insn_buf_eip;
     if ( diff > sh_ctxt->insn_buf_bytes )
     {
         /* Prefetch more bytes. */
         sh_ctxt->insn_buf_bytes =
             (!hvm_translate_linear_addr(
-                x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+                x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
                 hvm_access_insn_fetch, sh_ctxt, &addr) &&
              !hvm_fetch_from_guest_linear(
                  sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
             ? sizeof(sh_ctxt->insn_buf) : 0;
-        sh_ctxt->insn_buf_eip = regs->eip;
+        sh_ctxt->insn_buf_eip = regs->rip;
     }
 }
 
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2872,7 +2872,7 @@ static int sh_page_fault(struct vcpu *v,
 #endif
 
     SHADOW_PRINTK("%pv va=%#lx err=%#x, rip=%lx\n",
-                  v, va, regs->error_code, regs->eip);
+                  v, va, regs->error_code, regs->rip);
 
     perfc_incr(shadow_fault);
 
@@ -3357,8 +3357,7 @@ static int sh_page_fault(struct vcpu *v,
         }
     }
 
-    SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
-                  (unsigned long)regs->eip, (unsigned long)regs->esp);
+    SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", regs->rip, regs->rsp);
 
     emul_ops = shadow_init_emulation(&emul_ctxt, regs);
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 04/10] x86/oprofile: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (2 preceding siblings ...)
  2016-12-20 10:38 ` [PATCH 03/10] x86/shadow: " Jan Beulich
@ 2016-12-20 10:39 ` Jan Beulich
  2016-12-20 10:39 ` [PATCH 05/10] x86/HVM: " Jan Beulich
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:39 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 2297 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/oprofile/backtrace.c
+++ b/xen/arch/x86/oprofile/backtrace.c
@@ -150,7 +150,7 @@ static int valid_hypervisor_stack(const
 void xenoprof_backtrace(struct vcpu *vcpu, const struct cpu_user_regs *regs,
 			unsigned long depth, int mode)
 {
-    const struct frame_head *head = (void *)regs->ebp;
+    const struct frame_head *head = (void *)regs->rbp;
 
     if (mode > 1) {
         while (depth-- && valid_hypervisor_stack(head, regs))
--- a/xen/arch/x86/oprofile/op_model_athlon.c
+++ b/xen/arch/x86/oprofile/op_model_athlon.c
@@ -316,22 +316,20 @@ static int athlon_check_ctrs(unsigned in
 	uint64_t msr_content;
 	int i;
 	int ovf = 0;
-	unsigned long eip = regs->eip;
+	unsigned long eip = regs->rip;
 	int mode = 0;
 	struct vcpu *v = current;
 	struct cpu_user_regs *guest_regs = guest_cpu_user_regs();
 	unsigned int const nr_ctrs = model->num_counters;
 
 	if (!guest_mode(regs) &&
-	    (regs->eip == (unsigned long)svm_stgi_label)) {
+	    (eip == (unsigned long)svm_stgi_label)) {
 		/* SVM guest was running when NMI occurred */
 		ASSERT(is_hvm_vcpu(v));
-		eip = guest_regs->eip;
+		eip = guest_regs->rip;
 		mode = xenoprofile_get_mode(v, guest_regs);
-	} else {
-		eip = regs->eip;
+	} else
 		mode = xenoprofile_get_mode(v, regs);
-	}
 
 	for (i = 0 ; i < nr_ctrs; ++i) {
 		CTR_READ(msr_content, msrs, i);
--- a/xen/arch/x86/oprofile/op_model_p4.c
+++ b/xen/arch/x86/oprofile/op_model_p4.c
@@ -617,7 +617,7 @@ static int p4_check_ctrs(unsigned int co
 	uint64_t msr_content;
 	int i;
 	int ovf = 0;
-	unsigned long eip = regs->eip;
+	unsigned long eip = regs->rip;
 	int mode = xenoprofile_get_mode(current, regs);
 
 	stag = get_stagger();
--- a/xen/arch/x86/oprofile/op_model_ppro.c
+++ b/xen/arch/x86/oprofile/op_model_ppro.c
@@ -135,7 +135,7 @@ static int ppro_check_ctrs(unsigned int
 	u64 val;
 	int i;
 	int ovf = 0;
-	unsigned long eip = regs->eip;
+	unsigned long eip = regs->rip;
 	int mode = xenoprofile_get_mode(current, regs);
 	struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context;
 




[-- Attachment #2: x86-regnames-oprofile.patch --]
[-- Type: text/plain, Size: 2339 bytes --]

x86/oprofile: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/oprofile/backtrace.c
+++ b/xen/arch/x86/oprofile/backtrace.c
@@ -150,7 +150,7 @@ static int valid_hypervisor_stack(const
 void xenoprof_backtrace(struct vcpu *vcpu, const struct cpu_user_regs *regs,
 			unsigned long depth, int mode)
 {
-    const struct frame_head *head = (void *)regs->ebp;
+    const struct frame_head *head = (void *)regs->rbp;
 
     if (mode > 1) {
         while (depth-- && valid_hypervisor_stack(head, regs))
--- a/xen/arch/x86/oprofile/op_model_athlon.c
+++ b/xen/arch/x86/oprofile/op_model_athlon.c
@@ -316,22 +316,20 @@ static int athlon_check_ctrs(unsigned in
 	uint64_t msr_content;
 	int i;
 	int ovf = 0;
-	unsigned long eip = regs->eip;
+	unsigned long eip = regs->rip;
 	int mode = 0;
 	struct vcpu *v = current;
 	struct cpu_user_regs *guest_regs = guest_cpu_user_regs();
 	unsigned int const nr_ctrs = model->num_counters;
 
 	if (!guest_mode(regs) &&
-	    (regs->eip == (unsigned long)svm_stgi_label)) {
+	    (eip == (unsigned long)svm_stgi_label)) {
 		/* SVM guest was running when NMI occurred */
 		ASSERT(is_hvm_vcpu(v));
-		eip = guest_regs->eip;
+		eip = guest_regs->rip;
 		mode = xenoprofile_get_mode(v, guest_regs);
-	} else {
-		eip = regs->eip;
+	} else
 		mode = xenoprofile_get_mode(v, regs);
-	}
 
 	for (i = 0 ; i < nr_ctrs; ++i) {
 		CTR_READ(msr_content, msrs, i);
--- a/xen/arch/x86/oprofile/op_model_p4.c
+++ b/xen/arch/x86/oprofile/op_model_p4.c
@@ -617,7 +617,7 @@ static int p4_check_ctrs(unsigned int co
 	uint64_t msr_content;
 	int i;
 	int ovf = 0;
-	unsigned long eip = regs->eip;
+	unsigned long eip = regs->rip;
 	int mode = xenoprofile_get_mode(current, regs);
 
 	stag = get_stagger();
--- a/xen/arch/x86/oprofile/op_model_ppro.c
+++ b/xen/arch/x86/oprofile/op_model_ppro.c
@@ -135,7 +135,7 @@ static int ppro_check_ctrs(unsigned int
 	u64 val;
 	int i;
 	int ovf = 0;
-	unsigned long eip = regs->eip;
+	unsigned long eip = regs->rip;
 	int mode = xenoprofile_get_mode(current, regs);
 	struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context;
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 05/10] x86/HVM: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (3 preceding siblings ...)
  2016-12-20 10:39 ` [PATCH 04/10] x86/oprofile: " Jan Beulich
@ 2016-12-20 10:39 ` Jan Beulich
  2016-12-20 17:39   ` Andrew Cooper
  2016-12-20 10:40 ` [PATCH 06/10] x86/HVMemul: " Jan Beulich
                   ` (5 subsequent siblings)
  10 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:39 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 10714 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -882,16 +882,16 @@ static int hvm_save_cpu_ctxt(struct doma
             ctxt.flags = XEN_X86_FPU_INITIALISED;
         }
 
-        ctxt.rax = v->arch.user_regs.eax;
-        ctxt.rbx = v->arch.user_regs.ebx;
-        ctxt.rcx = v->arch.user_regs.ecx;
-        ctxt.rdx = v->arch.user_regs.edx;
-        ctxt.rbp = v->arch.user_regs.ebp;
-        ctxt.rsi = v->arch.user_regs.esi;
-        ctxt.rdi = v->arch.user_regs.edi;
-        ctxt.rsp = v->arch.user_regs.esp;
-        ctxt.rip = v->arch.user_regs.eip;
-        ctxt.rflags = v->arch.user_regs.eflags;
+        ctxt.rax = v->arch.user_regs.rax;
+        ctxt.rbx = v->arch.user_regs.rbx;
+        ctxt.rcx = v->arch.user_regs.rcx;
+        ctxt.rdx = v->arch.user_regs.rdx;
+        ctxt.rbp = v->arch.user_regs.rbp;
+        ctxt.rsi = v->arch.user_regs.rsi;
+        ctxt.rdi = v->arch.user_regs.rdi;
+        ctxt.rsp = v->arch.user_regs.rsp;
+        ctxt.rip = v->arch.user_regs.rip;
+        ctxt.rflags = v->arch.user_regs.rflags;
         ctxt.r8  = v->arch.user_regs.r8;
         ctxt.r9  = v->arch.user_regs.r9;
         ctxt.r10 = v->arch.user_regs.r10;
@@ -1197,16 +1197,16 @@ static int hvm_load_cpu_ctxt(struct doma
     if ( xsave_area )
         xsave_area->xsave_hdr.xcomp_bv = 0;
 
-    v->arch.user_regs.eax = ctxt.rax;
-    v->arch.user_regs.ebx = ctxt.rbx;
-    v->arch.user_regs.ecx = ctxt.rcx;
-    v->arch.user_regs.edx = ctxt.rdx;
-    v->arch.user_regs.ebp = ctxt.rbp;
-    v->arch.user_regs.esi = ctxt.rsi;
-    v->arch.user_regs.edi = ctxt.rdi;
-    v->arch.user_regs.esp = ctxt.rsp;
-    v->arch.user_regs.eip = ctxt.rip;
-    v->arch.user_regs.eflags = ctxt.rflags | X86_EFLAGS_MBS;
+    v->arch.user_regs.rax = ctxt.rax;
+    v->arch.user_regs.rbx = ctxt.rbx;
+    v->arch.user_regs.rcx = ctxt.rcx;
+    v->arch.user_regs.rdx = ctxt.rdx;
+    v->arch.user_regs.rbp = ctxt.rbp;
+    v->arch.user_regs.rsi = ctxt.rsi;
+    v->arch.user_regs.rdi = ctxt.rdi;
+    v->arch.user_regs.rsp = ctxt.rsp;
+    v->arch.user_regs.rip = ctxt.rip;
+    v->arch.user_regs.rflags = ctxt.rflags | X86_EFLAGS_MBS;
     v->arch.user_regs.r8  = ctxt.r8;
     v->arch.user_regs.r9  = ctxt.r9;
     v->arch.user_regs.r10 = ctxt.r10;
@@ -1658,7 +1658,7 @@ void hvm_vcpu_down(struct vcpu *v)
     }
 }
 
-void hvm_hlt(unsigned long rflags)
+void hvm_hlt(unsigned int eflags)
 {
     struct vcpu *curr = current;
 
@@ -1670,7 +1670,7 @@ void hvm_hlt(unsigned long rflags)
      * want to shut down. In a real processor, NMIs are the only way to break
      * out of this.
      */
-    if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
+    if ( unlikely(!(eflags & X86_EFLAGS_IF)) )
         return hvm_vcpu_down(curr);
 
     do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void));
@@ -2901,7 +2901,7 @@ void hvm_task_switch(
     struct segment_register gdt, tr, prev_tr, segr;
     struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
     bool_t otd_writable, ntd_writable;
-    unsigned long eflags;
+    unsigned int eflags;
     pagefault_info_t pfinfo;
     int exn_raised, rc;
     struct {
@@ -2975,20 +2975,20 @@ void hvm_task_switch(
     if ( rc != HVMCOPY_okay )
         goto out;
 
-    eflags = regs->eflags;
+    eflags = regs->_eflags;
     if ( taskswitch_reason == TSW_iret )
         eflags &= ~X86_EFLAGS_NT;
 
-    tss.eip    = regs->eip;
+    tss.eip    = regs->_eip;
     tss.eflags = eflags;
-    tss.eax    = regs->eax;
-    tss.ecx    = regs->ecx;
-    tss.edx    = regs->edx;
-    tss.ebx    = regs->ebx;
-    tss.esp    = regs->esp;
-    tss.ebp    = regs->ebp;
-    tss.esi    = regs->esi;
-    tss.edi    = regs->edi;
+    tss.eax    = regs->_eax;
+    tss.ecx    = regs->_ecx;
+    tss.edx    = regs->_edx;
+    tss.ebx    = regs->_ebx;
+    tss.esp    = regs->_esp;
+    tss.ebp    = regs->_ebp;
+    tss.esi    = regs->_esi;
+    tss.edi    = regs->_edi;
 
     hvm_get_segment_register(v, x86_seg_es, &segr);
     tss.es = segr.sel;
@@ -3032,16 +3032,16 @@ void hvm_task_switch(
     if ( hvm_set_cr3(tss.cr3, 1) )
         goto out;
 
-    regs->eip    = tss.eip;
-    regs->eflags = tss.eflags | 2;
-    regs->eax    = tss.eax;
-    regs->ecx    = tss.ecx;
-    regs->edx    = tss.edx;
-    regs->ebx    = tss.ebx;
-    regs->esp    = tss.esp;
-    regs->ebp    = tss.ebp;
-    regs->esi    = tss.esi;
-    regs->edi    = tss.edi;
+    regs->rip    = tss.eip;
+    regs->rflags = tss.eflags | 2;
+    regs->rax    = tss.eax;
+    regs->rcx    = tss.ecx;
+    regs->rdx    = tss.edx;
+    regs->rbx    = tss.ebx;
+    regs->rsp    = tss.esp;
+    regs->rbp    = tss.ebp;
+    regs->rsi    = tss.esi;
+    regs->rdi    = tss.edi;
 
     exn_raised = 0;
     if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
@@ -3054,7 +3054,7 @@ void hvm_task_switch(
 
     if ( taskswitch_reason == TSW_call_or_int )
     {
-        regs->eflags |= X86_EFLAGS_NT;
+        regs->_eflags |= X86_EFLAGS_NT;
         tss.back_link = prev_tr.sel;
 
         rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
@@ -4012,7 +4012,7 @@ void hvm_ud_intercept(struct cpu_user_re
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
 
-        if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->eip,
+        if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
                                         sizeof(sig), hvm_access_insn_fetch,
                                         (hvm_long_mode_enabled(cur) &&
                                          cs->attr.fields.l) ? 64 :
@@ -4021,12 +4021,12 @@ void hvm_ud_intercept(struct cpu_user_re
                                           walk, NULL) == HVMCOPY_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
-            regs->eip += sizeof(sig);
-            regs->eflags &= ~X86_EFLAGS_RF;
+            regs->rip += sizeof(sig);
+            regs->_eflags &= ~X86_EFLAGS_RF;
 
             /* Zero the upper 32 bits of %rip if not in 64bit mode. */
             if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) )
-                regs->eip = regs->_eip;
+                regs->rip = regs->_eip;
 
             add_taint(TAINT_HVM_FEP);
         }
@@ -4062,7 +4062,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
     }
 
     if ( (intack.source != hvm_intsrc_nmi) &&
-         !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
+         !(guest_cpu_user_regs()->_eflags & X86_EFLAGS_IF) )
         return hvm_intblk_rflags_ie;
 
     intr_shadow = hvm_funcs.get_interrupt_shadow(v);
@@ -4255,7 +4255,7 @@ int hvm_do_hypercall(struct cpu_user_reg
         if ( unlikely(hvm_get_cpl(curr)) )
         {
     default:
-            regs->eax = -EPERM;
+            regs->rax = -EPERM;
             return HVM_HCALL_completed;
         }
     case 0:
@@ -4271,7 +4271,7 @@ int hvm_do_hypercall(struct cpu_user_reg
     if ( (eax >= ARRAY_SIZE(hvm_hypercall_table)) ||
          !hvm_hypercall_table[eax].native )
     {
-        regs->eax = -ENOSYS;
+        regs->rax = -ENOSYS;
         return HVM_HCALL_completed;
     }
 
@@ -4317,9 +4317,9 @@ int hvm_do_hypercall(struct cpu_user_reg
             case 6: regs->r9  = 0xdeadbeefdeadf00dUL;
             case 5: regs->r8  = 0xdeadbeefdeadf00dUL;
             case 4: regs->r10 = 0xdeadbeefdeadf00dUL;
-            case 3: regs->edx = 0xdeadbeefdeadf00dUL;
-            case 2: regs->esi = 0xdeadbeefdeadf00dUL;
-            case 1: regs->edi = 0xdeadbeefdeadf00dUL;
+            case 3: regs->rdx = 0xdeadbeefdeadf00dUL;
+            case 2: regs->rsi = 0xdeadbeefdeadf00dUL;
+            case 1: regs->rdi = 0xdeadbeefdeadf00dUL;
             }
         }
 #endif
@@ -4349,8 +4349,8 @@ int hvm_do_hypercall(struct cpu_user_reg
         }
 #endif
 
-        regs->_eax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi,
-                                                     ebp);
+        regs->rax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi,
+                                                    ebp);
 
 #ifndef NDEBUG
         if ( !curr->arch.hvm_vcpu.hcall_preempted )
@@ -4358,19 +4358,18 @@ int hvm_do_hypercall(struct cpu_user_reg
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].compat )
             {
-            case 6: regs->ebp = 0xdeadf00d;
-            case 5: regs->edi = 0xdeadf00d;
-            case 4: regs->esi = 0xdeadf00d;
-            case 3: regs->edx = 0xdeadf00d;
-            case 2: regs->ecx = 0xdeadf00d;
-            case 1: regs->ebx = 0xdeadf00d;
+            case 6: regs->rbp = 0xdeadf00d;
+            case 5: regs->rdi = 0xdeadf00d;
+            case 4: regs->rsi = 0xdeadf00d;
+            case 3: regs->rdx = 0xdeadf00d;
+            case 2: regs->rcx = 0xdeadf00d;
+            case 1: regs->rbx = 0xdeadf00d;
             }
         }
 #endif
     }
 
-    HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx",
-                eax, (unsigned long)regs->eax);
+    HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx", eax, regs->rax);
 
     if ( curr->arch.hvm_vcpu.hcall_preempted )
         return HVM_HCALL_preempted;
@@ -4490,9 +4489,9 @@ void hvm_vcpu_reset_state(struct vcpu *v
 
     v->arch.vgc_flags = VGCF_online;
     memset(&v->arch.user_regs, 0, sizeof(v->arch.user_regs));
-    v->arch.user_regs.eflags = X86_EFLAGS_MBS;
-    v->arch.user_regs.edx = 0x00000f00;
-    v->arch.user_regs.eip = ip;
+    v->arch.user_regs.rflags = X86_EFLAGS_MBS;
+    v->arch.user_regs.rdx = 0x00000f00;
+    v->arch.user_regs.rip = ip;
     memset(&v->arch.debugreg, 0, sizeof(v->arch.debugreg));
 
     v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -108,7 +108,7 @@ enum hvm_copy_result hvm_fetch_from_gues
 #define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache        */
 int hvm_do_hypercall(struct cpu_user_regs *pregs);
 
-void hvm_hlt(unsigned long rflags);
+void hvm_hlt(unsigned int eflags);
 void hvm_triple_fault(void);
 
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs);



[-- Attachment #2: x86-regnames-HVM.patch --]
[-- Type: text/plain, Size: 10753 bytes --]

x86/HVM: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -882,16 +882,16 @@ static int hvm_save_cpu_ctxt(struct doma
             ctxt.flags = XEN_X86_FPU_INITIALISED;
         }
 
-        ctxt.rax = v->arch.user_regs.eax;
-        ctxt.rbx = v->arch.user_regs.ebx;
-        ctxt.rcx = v->arch.user_regs.ecx;
-        ctxt.rdx = v->arch.user_regs.edx;
-        ctxt.rbp = v->arch.user_regs.ebp;
-        ctxt.rsi = v->arch.user_regs.esi;
-        ctxt.rdi = v->arch.user_regs.edi;
-        ctxt.rsp = v->arch.user_regs.esp;
-        ctxt.rip = v->arch.user_regs.eip;
-        ctxt.rflags = v->arch.user_regs.eflags;
+        ctxt.rax = v->arch.user_regs.rax;
+        ctxt.rbx = v->arch.user_regs.rbx;
+        ctxt.rcx = v->arch.user_regs.rcx;
+        ctxt.rdx = v->arch.user_regs.rdx;
+        ctxt.rbp = v->arch.user_regs.rbp;
+        ctxt.rsi = v->arch.user_regs.rsi;
+        ctxt.rdi = v->arch.user_regs.rdi;
+        ctxt.rsp = v->arch.user_regs.rsp;
+        ctxt.rip = v->arch.user_regs.rip;
+        ctxt.rflags = v->arch.user_regs.rflags;
         ctxt.r8  = v->arch.user_regs.r8;
         ctxt.r9  = v->arch.user_regs.r9;
         ctxt.r10 = v->arch.user_regs.r10;
@@ -1197,16 +1197,16 @@ static int hvm_load_cpu_ctxt(struct doma
     if ( xsave_area )
         xsave_area->xsave_hdr.xcomp_bv = 0;
 
-    v->arch.user_regs.eax = ctxt.rax;
-    v->arch.user_regs.ebx = ctxt.rbx;
-    v->arch.user_regs.ecx = ctxt.rcx;
-    v->arch.user_regs.edx = ctxt.rdx;
-    v->arch.user_regs.ebp = ctxt.rbp;
-    v->arch.user_regs.esi = ctxt.rsi;
-    v->arch.user_regs.edi = ctxt.rdi;
-    v->arch.user_regs.esp = ctxt.rsp;
-    v->arch.user_regs.eip = ctxt.rip;
-    v->arch.user_regs.eflags = ctxt.rflags | X86_EFLAGS_MBS;
+    v->arch.user_regs.rax = ctxt.rax;
+    v->arch.user_regs.rbx = ctxt.rbx;
+    v->arch.user_regs.rcx = ctxt.rcx;
+    v->arch.user_regs.rdx = ctxt.rdx;
+    v->arch.user_regs.rbp = ctxt.rbp;
+    v->arch.user_regs.rsi = ctxt.rsi;
+    v->arch.user_regs.rdi = ctxt.rdi;
+    v->arch.user_regs.rsp = ctxt.rsp;
+    v->arch.user_regs.rip = ctxt.rip;
+    v->arch.user_regs.rflags = ctxt.rflags | X86_EFLAGS_MBS;
     v->arch.user_regs.r8  = ctxt.r8;
     v->arch.user_regs.r9  = ctxt.r9;
     v->arch.user_regs.r10 = ctxt.r10;
@@ -1658,7 +1658,7 @@ void hvm_vcpu_down(struct vcpu *v)
     }
 }
 
-void hvm_hlt(unsigned long rflags)
+void hvm_hlt(unsigned int eflags)
 {
     struct vcpu *curr = current;
 
@@ -1670,7 +1670,7 @@ void hvm_hlt(unsigned long rflags)
      * want to shut down. In a real processor, NMIs are the only way to break
      * out of this.
      */
-    if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
+    if ( unlikely(!(eflags & X86_EFLAGS_IF)) )
         return hvm_vcpu_down(curr);
 
     do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void));
@@ -2901,7 +2901,7 @@ void hvm_task_switch(
     struct segment_register gdt, tr, prev_tr, segr;
     struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
     bool_t otd_writable, ntd_writable;
-    unsigned long eflags;
+    unsigned int eflags;
     pagefault_info_t pfinfo;
     int exn_raised, rc;
     struct {
@@ -2975,20 +2975,20 @@ void hvm_task_switch(
     if ( rc != HVMCOPY_okay )
         goto out;
 
-    eflags = regs->eflags;
+    eflags = regs->_eflags;
     if ( taskswitch_reason == TSW_iret )
         eflags &= ~X86_EFLAGS_NT;
 
-    tss.eip    = regs->eip;
+    tss.eip    = regs->_eip;
     tss.eflags = eflags;
-    tss.eax    = regs->eax;
-    tss.ecx    = regs->ecx;
-    tss.edx    = regs->edx;
-    tss.ebx    = regs->ebx;
-    tss.esp    = regs->esp;
-    tss.ebp    = regs->ebp;
-    tss.esi    = regs->esi;
-    tss.edi    = regs->edi;
+    tss.eax    = regs->_eax;
+    tss.ecx    = regs->_ecx;
+    tss.edx    = regs->_edx;
+    tss.ebx    = regs->_ebx;
+    tss.esp    = regs->_esp;
+    tss.ebp    = regs->_ebp;
+    tss.esi    = regs->_esi;
+    tss.edi    = regs->_edi;
 
     hvm_get_segment_register(v, x86_seg_es, &segr);
     tss.es = segr.sel;
@@ -3032,16 +3032,16 @@ void hvm_task_switch(
     if ( hvm_set_cr3(tss.cr3, 1) )
         goto out;
 
-    regs->eip    = tss.eip;
-    regs->eflags = tss.eflags | 2;
-    regs->eax    = tss.eax;
-    regs->ecx    = tss.ecx;
-    regs->edx    = tss.edx;
-    regs->ebx    = tss.ebx;
-    regs->esp    = tss.esp;
-    regs->ebp    = tss.ebp;
-    regs->esi    = tss.esi;
-    regs->edi    = tss.edi;
+    regs->rip    = tss.eip;
+    regs->rflags = tss.eflags | 2;
+    regs->rax    = tss.eax;
+    regs->rcx    = tss.ecx;
+    regs->rdx    = tss.edx;
+    regs->rbx    = tss.ebx;
+    regs->rsp    = tss.esp;
+    regs->rbp    = tss.ebp;
+    regs->rsi    = tss.esi;
+    regs->rdi    = tss.edi;
 
     exn_raised = 0;
     if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
@@ -3054,7 +3054,7 @@ void hvm_task_switch(
 
     if ( taskswitch_reason == TSW_call_or_int )
     {
-        regs->eflags |= X86_EFLAGS_NT;
+        regs->_eflags |= X86_EFLAGS_NT;
         tss.back_link = prev_tr.sel;
 
         rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
@@ -4012,7 +4012,7 @@ void hvm_ud_intercept(struct cpu_user_re
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
 
-        if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->eip,
+        if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
                                         sizeof(sig), hvm_access_insn_fetch,
                                         (hvm_long_mode_enabled(cur) &&
                                          cs->attr.fields.l) ? 64 :
@@ -4021,12 +4021,12 @@ void hvm_ud_intercept(struct cpu_user_re
                                           walk, NULL) == HVMCOPY_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
-            regs->eip += sizeof(sig);
-            regs->eflags &= ~X86_EFLAGS_RF;
+            regs->rip += sizeof(sig);
+            regs->_eflags &= ~X86_EFLAGS_RF;
 
             /* Zero the upper 32 bits of %rip if not in 64bit mode. */
             if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) )
-                regs->eip = regs->_eip;
+                regs->rip = regs->_eip;
 
             add_taint(TAINT_HVM_FEP);
         }
@@ -4062,7 +4062,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
     }
 
     if ( (intack.source != hvm_intsrc_nmi) &&
-         !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
+         !(guest_cpu_user_regs()->_eflags & X86_EFLAGS_IF) )
         return hvm_intblk_rflags_ie;
 
     intr_shadow = hvm_funcs.get_interrupt_shadow(v);
@@ -4255,7 +4255,7 @@ int hvm_do_hypercall(struct cpu_user_reg
         if ( unlikely(hvm_get_cpl(curr)) )
         {
     default:
-            regs->eax = -EPERM;
+            regs->rax = -EPERM;
             return HVM_HCALL_completed;
         }
     case 0:
@@ -4271,7 +4271,7 @@ int hvm_do_hypercall(struct cpu_user_reg
     if ( (eax >= ARRAY_SIZE(hvm_hypercall_table)) ||
          !hvm_hypercall_table[eax].native )
     {
-        regs->eax = -ENOSYS;
+        regs->rax = -ENOSYS;
         return HVM_HCALL_completed;
     }
 
@@ -4317,9 +4317,9 @@ int hvm_do_hypercall(struct cpu_user_reg
             case 6: regs->r9  = 0xdeadbeefdeadf00dUL;
             case 5: regs->r8  = 0xdeadbeefdeadf00dUL;
             case 4: regs->r10 = 0xdeadbeefdeadf00dUL;
-            case 3: regs->edx = 0xdeadbeefdeadf00dUL;
-            case 2: regs->esi = 0xdeadbeefdeadf00dUL;
-            case 1: regs->edi = 0xdeadbeefdeadf00dUL;
+            case 3: regs->rdx = 0xdeadbeefdeadf00dUL;
+            case 2: regs->rsi = 0xdeadbeefdeadf00dUL;
+            case 1: regs->rdi = 0xdeadbeefdeadf00dUL;
             }
         }
 #endif
@@ -4349,8 +4349,8 @@ int hvm_do_hypercall(struct cpu_user_reg
         }
 #endif
 
-        regs->_eax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi,
-                                                     ebp);
+        regs->rax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi,
+                                                    ebp);
 
 #ifndef NDEBUG
         if ( !curr->arch.hvm_vcpu.hcall_preempted )
@@ -4358,19 +4358,18 @@ int hvm_do_hypercall(struct cpu_user_reg
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].compat )
             {
-            case 6: regs->ebp = 0xdeadf00d;
-            case 5: regs->edi = 0xdeadf00d;
-            case 4: regs->esi = 0xdeadf00d;
-            case 3: regs->edx = 0xdeadf00d;
-            case 2: regs->ecx = 0xdeadf00d;
-            case 1: regs->ebx = 0xdeadf00d;
+            case 6: regs->rbp = 0xdeadf00d;
+            case 5: regs->rdi = 0xdeadf00d;
+            case 4: regs->rsi = 0xdeadf00d;
+            case 3: regs->rdx = 0xdeadf00d;
+            case 2: regs->rcx = 0xdeadf00d;
+            case 1: regs->rbx = 0xdeadf00d;
             }
         }
 #endif
     }
 
-    HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx",
-                eax, (unsigned long)regs->eax);
+    HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx", eax, regs->rax);
 
     if ( curr->arch.hvm_vcpu.hcall_preempted )
         return HVM_HCALL_preempted;
@@ -4490,9 +4489,9 @@ void hvm_vcpu_reset_state(struct vcpu *v
 
     v->arch.vgc_flags = VGCF_online;
     memset(&v->arch.user_regs, 0, sizeof(v->arch.user_regs));
-    v->arch.user_regs.eflags = X86_EFLAGS_MBS;
-    v->arch.user_regs.edx = 0x00000f00;
-    v->arch.user_regs.eip = ip;
+    v->arch.user_regs.rflags = X86_EFLAGS_MBS;
+    v->arch.user_regs.rdx = 0x00000f00;
+    v->arch.user_regs.rip = ip;
     memset(&v->arch.debugreg, 0, sizeof(v->arch.debugreg));
 
     v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -108,7 +108,7 @@ enum hvm_copy_result hvm_fetch_from_gues
 #define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache        */
 int hvm_do_hypercall(struct cpu_user_regs *pregs);
 
-void hvm_hlt(unsigned long rflags);
+void hvm_hlt(unsigned int eflags);
 void hvm_triple_fault(void);
 
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs);

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 06/10] x86/HVMemul: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (4 preceding siblings ...)
  2016-12-20 10:39 ` [PATCH 05/10] x86/HVM: " Jan Beulich
@ 2016-12-20 10:40 ` Jan Beulich
  2016-12-20 10:41 ` [PATCH 07/10] x86/SVM: " Jan Beulich
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:40 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Paul Durrant

[-- Attachment #1: Type: text/plain, Size: 3565 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys(
     }
 
     /* Reverse mode if this is a backwards multi-iteration string operation. */
-    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
+    reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1);
 
     if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
     {
@@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear(
     if ( IS_ERR(reg) )
         return -PTR_ERR(reg);
 
-    if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
+    if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1) )
     {
         /*
          * x86_emulate() clips the repetition count to ensure we don't wrap
@@ -1074,7 +1074,7 @@ static int hvmemul_rep_ins(
         return X86EMUL_UNHANDLEABLE;
 
     return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep, IOREQ_READ,
-                               !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
+                               !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
 }
 
 static int hvmemul_rep_outs_set_context(
@@ -1143,7 +1143,7 @@ static int hvmemul_rep_outs(
         return X86EMUL_UNHANDLEABLE;
 
     return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep, IOREQ_WRITE,
-                               !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
+                               !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
 }
 
 static int hvmemul_rep_movs(
@@ -1162,7 +1162,7 @@ static int hvmemul_rep_movs(
     paddr_t sgpa, dgpa;
     uint32_t pfec = PFEC_page_present;
     p2m_type_t sp2mt, dp2mt;
-    int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
+    int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
     char *buf;
 
     rc = hvmemul_virtual_to_linear(
@@ -1316,7 +1316,7 @@ static int hvmemul_rep_stos(
     unsigned long addr, bytes;
     paddr_t gpa;
     p2m_type_t p2mt;
-    bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
+    bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
     int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
                                        hvm_access_write, hvmemul_ctxt, &addr);
 
@@ -1766,7 +1766,7 @@ static int _hvm_emulate_one(struct hvm_e
     if ( hvmemul_ctxt->ctxt.retire.hlt &&
          !hvm_local_events_need_delivery(curr) )
     {
-        hvm_hlt(regs->eflags);
+        hvm_hlt(regs->_eflags);
     }
 
     return X86EMUL_OKAY;
@@ -1930,7 +1930,7 @@ void hvm_emulate_init_per_insn(
     if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
         pfec |= PFEC_user_mode;
 
-    hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->eip;
+    hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->rip;
     if ( !insn_bytes )
     {
         hvmemul_ctxt->insn_buf_bytes =
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -136,7 +136,7 @@ int handle_pio(uint16_t port, unsigned i
     ASSERT((size - 1) < 4 && size != 3);
 
     if ( dir == IOREQ_WRITE )
-        data = guest_cpu_user_regs()->eax;
+        data = guest_cpu_user_regs()->_eax;
 
     rc = hvmemul_do_pio_buffer(port, size, dir, &data);
 




[-- Attachment #2: x86-regnames-HVM-emul.patch --]
[-- Type: text/plain, Size: 3606 bytes --]

x86/HVMemul: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys(
     }
 
     /* Reverse mode if this is a backwards multi-iteration string operation. */
-    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
+    reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1);
 
     if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
     {
@@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear(
     if ( IS_ERR(reg) )
         return -PTR_ERR(reg);
 
-    if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
+    if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1) )
     {
         /*
          * x86_emulate() clips the repetition count to ensure we don't wrap
@@ -1074,7 +1074,7 @@ static int hvmemul_rep_ins(
         return X86EMUL_UNHANDLEABLE;
 
     return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep, IOREQ_READ,
-                               !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
+                               !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
 }
 
 static int hvmemul_rep_outs_set_context(
@@ -1143,7 +1143,7 @@ static int hvmemul_rep_outs(
         return X86EMUL_UNHANDLEABLE;
 
     return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep, IOREQ_WRITE,
-                               !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
+                               !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa);
 }
 
 static int hvmemul_rep_movs(
@@ -1162,7 +1162,7 @@ static int hvmemul_rep_movs(
     paddr_t sgpa, dgpa;
     uint32_t pfec = PFEC_page_present;
     p2m_type_t sp2mt, dp2mt;
-    int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
+    int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
     char *buf;
 
     rc = hvmemul_virtual_to_linear(
@@ -1316,7 +1316,7 @@ static int hvmemul_rep_stos(
     unsigned long addr, bytes;
     paddr_t gpa;
     p2m_type_t p2mt;
-    bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
+    bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF);
     int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
                                        hvm_access_write, hvmemul_ctxt, &addr);
 
@@ -1766,7 +1766,7 @@ static int _hvm_emulate_one(struct hvm_e
     if ( hvmemul_ctxt->ctxt.retire.hlt &&
          !hvm_local_events_need_delivery(curr) )
     {
-        hvm_hlt(regs->eflags);
+        hvm_hlt(regs->_eflags);
     }
 
     return X86EMUL_OKAY;
@@ -1930,7 +1930,7 @@ void hvm_emulate_init_per_insn(
     if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
         pfec |= PFEC_user_mode;
 
-    hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->eip;
+    hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->rip;
     if ( !insn_bytes )
     {
         hvmemul_ctxt->insn_buf_bytes =
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -136,7 +136,7 @@ int handle_pio(uint16_t port, unsigned i
     ASSERT((size - 1) < 4 && size != 3);
 
     if ( dir == IOREQ_WRITE )
-        data = guest_cpu_user_regs()->eax;
+        data = guest_cpu_user_regs()->_eax;
 
     rc = hvmemul_do_pio_buffer(port, size, dir, &data);
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 07/10] x86/SVM: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (5 preceding siblings ...)
  2016-12-20 10:40 ` [PATCH 06/10] x86/HVMemul: " Jan Beulich
@ 2016-12-20 10:41 ` Jan Beulich
  2016-12-26  5:46   ` Suravee Suthikulpanit
  2016-12-20 10:42 ` [PATCH 08/10] x86/vm-event: " Jan Beulich
                   ` (3 subsequent siblings)
  10 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:41 UTC (permalink / raw)
  To: xen-devel
  Cc: George Dunlap, Andrew Cooper, Boris Ostrovsky, Suravee Suthikulpanit

[-- Attachment #1: Type: text/plain, Size: 6962 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -322,10 +322,10 @@ static int nsvm_vcpu_hostrestore(struct
     if (rc != X86EMUL_OKAY)
         gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
 
-    regs->eax = n1vmcb->rax;
-    regs->esp = n1vmcb->rsp;
-    regs->eip = n1vmcb->rip;
-    regs->eflags = n1vmcb->rflags;
+    regs->rax = n1vmcb->rax;
+    regs->rsp = n1vmcb->rsp;
+    regs->rip = n1vmcb->rip;
+    regs->rflags = n1vmcb->rflags;
     n1vmcb->_dr7 = 0; /* disable all breakpoints */
     n1vmcb->_cpl = 0;
 
@@ -653,10 +653,10 @@ static int nsvm_vmcb_prepare4vmrun(struc
     }
 
     /* Switch guest registers to l2 guest */
-    regs->eax = ns_vmcb->rax;
-    regs->eip = ns_vmcb->rip;
-    regs->esp = ns_vmcb->rsp;
-    regs->eflags = ns_vmcb->rflags;
+    regs->rax = ns_vmcb->rax;
+    regs->rip = ns_vmcb->rip;
+    regs->rsp = ns_vmcb->rsp;
+    regs->rflags = ns_vmcb->rflags;
 
 #undef vcleanbit_set
     return 0;
@@ -975,7 +975,7 @@ nsvm_vmcb_guest_intercepts_exitcode(stru
             break;
         ns_vmcb = nv->nv_vvmcx;
         vmexits = nsvm_vmcb_guest_intercepts_msr(svm->ns_cached_msrpm,
-            regs->ecx, ns_vmcb->exitinfo1 != 0);
+            regs->_ecx, ns_vmcb->exitinfo1 != 0);
         if (vmexits == NESTEDHVM_VMEXIT_HOST)
             return 0;
         break;
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -110,12 +110,12 @@ void __update_guest_eip(struct cpu_user_
 
     ASSERT(regs == guest_cpu_user_regs());
 
-    regs->eip += inst_len;
-    regs->eflags &= ~X86_EFLAGS_RF;
+    regs->rip += inst_len;
+    regs->_eflags &= ~X86_EFLAGS_RF;
 
     curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
 
-    if ( regs->eflags & X86_EFLAGS_TF )
+    if ( regs->_eflags & X86_EFLAGS_TF )
         hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 }
 
@@ -520,7 +520,7 @@ static int svm_guest_x86_mode(struct vcp
 
     if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
-    if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
         return 1;
     if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
         return 8;
@@ -1226,7 +1226,7 @@ static void svm_inject_event(const struc
     switch ( _event.vector )
     {
     case TRAP_debug:
-        if ( regs->eflags & X86_EFLAGS_TF )
+        if ( regs->_eflags & X86_EFLAGS_TF )
         {
             __restore_debug_registers(vmcb, curr);
             vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000);
@@ -1635,18 +1635,18 @@ static void svm_vmexit_do_cpuid(struct c
     if ( (inst_len = __get_instruction_length(current, INSTR_CPUID)) == 0 )
         return;
 
-    eax = regs->eax;
-    ebx = regs->ebx;
-    ecx = regs->ecx;
-    edx = regs->edx;
+    eax = regs->_eax;
+    ebx = regs->_ebx;
+    ecx = regs->_ecx;
+    edx = regs->_edx;
 
     hvm_cpuid(regs->_eax, &eax, &ebx, &ecx, &edx);
     HVMTRACE_5D(CPUID, regs->_eax, eax, ebx, ecx, edx);
 
-    regs->eax = eax;
-    regs->ebx = ebx;
-    regs->ecx = ecx;
-    regs->edx = edx;
+    regs->rax = eax;
+    regs->rbx = ebx;
+    regs->rcx = ecx;
+    regs->rdx = edx;
 
     __update_guest_eip(regs, inst_len);
 }
@@ -2011,7 +2011,7 @@ static void svm_vmexit_do_hlt(struct vmc
         return;
     __update_guest_eip(regs, inst_len);
 
-    hvm_hlt(regs->eflags);
+    hvm_hlt(regs->_eflags);
 }
 
 static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs)
@@ -2332,13 +2332,11 @@ void svm_vmexit_handler(struct cpu_user_
     if ( hvm_long_mode_enabled(v) )
         HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
                     1/*cycles*/, 3, exit_reason,
-                    (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
-                    0, 0, 0);
+                    regs->_eip, regs->rip >> 32, 0, 0, 0);
     else
         HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
                     1/*cycles*/, 2, exit_reason,
-                    (uint32_t)regs->eip,
-                    0, 0, 0, 0);
+                    regs->_eip, 0, 0, 0, 0);
 
     if ( vcpu_guestmode ) {
         enum nestedhvm_vmexits nsret;
@@ -2476,9 +2474,8 @@ void svm_vmexit_handler(struct cpu_user_
         regs->error_code = vmcb->exitinfo1;
         HVM_DBG_LOG(DBG_LEVEL_VMMU,
                     "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
-                    (unsigned long)regs->eax, (unsigned long)regs->ebx,
-                    (unsigned long)regs->ecx, (unsigned long)regs->edx,
-                    (unsigned long)regs->esi, (unsigned long)regs->edi);
+                    regs->rax, regs->rbx, regs->rcx,
+                    regs->rdx, regs->rsi, regs->rdi);
 
         if ( cpu_has_svm_decode )
             v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
@@ -2616,7 +2613,7 @@ void svm_vmexit_handler(struct cpu_user_
     case VMEXIT_INVLPGA:
         if ( (inst_len = __get_instruction_length(v, INSTR_INVLPGA)) == 0 )
             break;
-        svm_invlpga_intercept(v, regs->eax, regs->ecx);
+        svm_invlpga_intercept(v, regs->rax, regs->_ecx);
         __update_guest_eip(regs, inst_len);
         break;
 
@@ -2624,7 +2621,7 @@ void svm_vmexit_handler(struct cpu_user_
         if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
             break;
         BUG_ON(vcpu_guestmode);
-        HVMTRACE_1D(VMMCALL, regs->eax);
+        HVMTRACE_1D(VMMCALL, regs->_eax);
         rc = hvm_do_hypercall(regs);
         if ( rc != HVM_HCALL_preempted )
         {
@@ -2648,7 +2645,7 @@ void svm_vmexit_handler(struct cpu_user_
         break;
 
     case VMEXIT_RDTSCP:
-        regs->ecx = hvm_msr_tsc_aux(v);
+        regs->rcx = hvm_msr_tsc_aux(v);
         /* fall through */
     case VMEXIT_RDTSC:
         svm_vmexit_do_rdtsc(regs);
@@ -2660,13 +2657,13 @@ void svm_vmexit_handler(struct cpu_user_
         break;
 
     case VMEXIT_VMRUN:
-        svm_vmexit_do_vmrun(regs, v, regs->eax);
+        svm_vmexit_do_vmrun(regs, v, regs->rax);
         break;
     case VMEXIT_VMLOAD:
-        svm_vmexit_do_vmload(vmcb, regs, v, regs->eax);
+        svm_vmexit_do_vmload(vmcb, regs, v, regs->rax);
         break;
     case VMEXIT_VMSAVE:
-        svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax);
+        svm_vmexit_do_vmsave(vmcb, regs, v, regs->rax);
         break;
     case VMEXIT_STGI:
         svm_vmexit_do_stgi(regs, v);



[-- Attachment #2: x86-regnames-SVM.patch --]
[-- Type: text/plain, Size: 7001 bytes --]

x86/SVM: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -322,10 +322,10 @@ static int nsvm_vcpu_hostrestore(struct
     if (rc != X86EMUL_OKAY)
         gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
 
-    regs->eax = n1vmcb->rax;
-    regs->esp = n1vmcb->rsp;
-    regs->eip = n1vmcb->rip;
-    regs->eflags = n1vmcb->rflags;
+    regs->rax = n1vmcb->rax;
+    regs->rsp = n1vmcb->rsp;
+    regs->rip = n1vmcb->rip;
+    regs->rflags = n1vmcb->rflags;
     n1vmcb->_dr7 = 0; /* disable all breakpoints */
     n1vmcb->_cpl = 0;
 
@@ -653,10 +653,10 @@ static int nsvm_vmcb_prepare4vmrun(struc
     }
 
     /* Switch guest registers to l2 guest */
-    regs->eax = ns_vmcb->rax;
-    regs->eip = ns_vmcb->rip;
-    regs->esp = ns_vmcb->rsp;
-    regs->eflags = ns_vmcb->rflags;
+    regs->rax = ns_vmcb->rax;
+    regs->rip = ns_vmcb->rip;
+    regs->rsp = ns_vmcb->rsp;
+    regs->rflags = ns_vmcb->rflags;
 
 #undef vcleanbit_set
     return 0;
@@ -975,7 +975,7 @@ nsvm_vmcb_guest_intercepts_exitcode(stru
             break;
         ns_vmcb = nv->nv_vvmcx;
         vmexits = nsvm_vmcb_guest_intercepts_msr(svm->ns_cached_msrpm,
-            regs->ecx, ns_vmcb->exitinfo1 != 0);
+            regs->_ecx, ns_vmcb->exitinfo1 != 0);
         if (vmexits == NESTEDHVM_VMEXIT_HOST)
             return 0;
         break;
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -110,12 +110,12 @@ void __update_guest_eip(struct cpu_user_
 
     ASSERT(regs == guest_cpu_user_regs());
 
-    regs->eip += inst_len;
-    regs->eflags &= ~X86_EFLAGS_RF;
+    regs->rip += inst_len;
+    regs->_eflags &= ~X86_EFLAGS_RF;
 
     curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
 
-    if ( regs->eflags & X86_EFLAGS_TF )
+    if ( regs->_eflags & X86_EFLAGS_TF )
         hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 }
 
@@ -520,7 +520,7 @@ static int svm_guest_x86_mode(struct vcp
 
     if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
-    if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
         return 1;
     if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
         return 8;
@@ -1226,7 +1226,7 @@ static void svm_inject_event(const struc
     switch ( _event.vector )
     {
     case TRAP_debug:
-        if ( regs->eflags & X86_EFLAGS_TF )
+        if ( regs->_eflags & X86_EFLAGS_TF )
         {
             __restore_debug_registers(vmcb, curr);
             vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000);
@@ -1635,18 +1635,18 @@ static void svm_vmexit_do_cpuid(struct c
     if ( (inst_len = __get_instruction_length(current, INSTR_CPUID)) == 0 )
         return;
 
-    eax = regs->eax;
-    ebx = regs->ebx;
-    ecx = regs->ecx;
-    edx = regs->edx;
+    eax = regs->_eax;
+    ebx = regs->_ebx;
+    ecx = regs->_ecx;
+    edx = regs->_edx;
 
     hvm_cpuid(regs->_eax, &eax, &ebx, &ecx, &edx);
     HVMTRACE_5D(CPUID, regs->_eax, eax, ebx, ecx, edx);
 
-    regs->eax = eax;
-    regs->ebx = ebx;
-    regs->ecx = ecx;
-    regs->edx = edx;
+    regs->rax = eax;
+    regs->rbx = ebx;
+    regs->rcx = ecx;
+    regs->rdx = edx;
 
     __update_guest_eip(regs, inst_len);
 }
@@ -2011,7 +2011,7 @@ static void svm_vmexit_do_hlt(struct vmc
         return;
     __update_guest_eip(regs, inst_len);
 
-    hvm_hlt(regs->eflags);
+    hvm_hlt(regs->_eflags);
 }
 
 static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs)
@@ -2332,13 +2332,11 @@ void svm_vmexit_handler(struct cpu_user_
     if ( hvm_long_mode_enabled(v) )
         HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
                     1/*cycles*/, 3, exit_reason,
-                    (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
-                    0, 0, 0);
+                    regs->_eip, regs->rip >> 32, 0, 0, 0);
     else
         HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
                     1/*cycles*/, 2, exit_reason,
-                    (uint32_t)regs->eip,
-                    0, 0, 0, 0);
+                    regs->_eip, 0, 0, 0, 0);
 
     if ( vcpu_guestmode ) {
         enum nestedhvm_vmexits nsret;
@@ -2476,9 +2474,8 @@ void svm_vmexit_handler(struct cpu_user_
         regs->error_code = vmcb->exitinfo1;
         HVM_DBG_LOG(DBG_LEVEL_VMMU,
                     "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
-                    (unsigned long)regs->eax, (unsigned long)regs->ebx,
-                    (unsigned long)regs->ecx, (unsigned long)regs->edx,
-                    (unsigned long)regs->esi, (unsigned long)regs->edi);
+                    regs->rax, regs->rbx, regs->rcx,
+                    regs->rdx, regs->rsi, regs->rdi);
 
         if ( cpu_has_svm_decode )
             v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
@@ -2616,7 +2613,7 @@ void svm_vmexit_handler(struct cpu_user_
     case VMEXIT_INVLPGA:
         if ( (inst_len = __get_instruction_length(v, INSTR_INVLPGA)) == 0 )
             break;
-        svm_invlpga_intercept(v, regs->eax, regs->ecx);
+        svm_invlpga_intercept(v, regs->rax, regs->_ecx);
         __update_guest_eip(regs, inst_len);
         break;
 
@@ -2624,7 +2621,7 @@ void svm_vmexit_handler(struct cpu_user_
         if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
             break;
         BUG_ON(vcpu_guestmode);
-        HVMTRACE_1D(VMMCALL, regs->eax);
+        HVMTRACE_1D(VMMCALL, regs->_eax);
         rc = hvm_do_hypercall(regs);
         if ( rc != HVM_HCALL_preempted )
         {
@@ -2648,7 +2645,7 @@ void svm_vmexit_handler(struct cpu_user_
         break;
 
     case VMEXIT_RDTSCP:
-        regs->ecx = hvm_msr_tsc_aux(v);
+        regs->rcx = hvm_msr_tsc_aux(v);
         /* fall through */
     case VMEXIT_RDTSC:
         svm_vmexit_do_rdtsc(regs);
@@ -2660,13 +2657,13 @@ void svm_vmexit_handler(struct cpu_user_
         break;
 
     case VMEXIT_VMRUN:
-        svm_vmexit_do_vmrun(regs, v, regs->eax);
+        svm_vmexit_do_vmrun(regs, v, regs->rax);
         break;
     case VMEXIT_VMLOAD:
-        svm_vmexit_do_vmload(vmcb, regs, v, regs->eax);
+        svm_vmexit_do_vmload(vmcb, regs, v, regs->rax);
         break;
     case VMEXIT_VMSAVE:
-        svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax);
+        svm_vmexit_do_vmsave(vmcb, regs, v, regs->rax);
         break;
     case VMEXIT_STGI:
         svm_vmexit_do_stgi(regs, v);

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 08/10] x86/vm-event: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (6 preceding siblings ...)
  2016-12-20 10:41 ` [PATCH 07/10] x86/SVM: " Jan Beulich
@ 2016-12-20 10:42 ` Jan Beulich
  2016-12-20 17:30   ` Tamas K Lengyel
  2016-12-22 16:08   ` Razvan Cojocaru
  2016-12-20 10:42 ` [PATCH 09/10] x86/traps: " Jan Beulich
                   ` (2 subsequent siblings)
  10 siblings, 2 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:42 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, tamas, Razvan Cojocaru

[-- Attachment #1: Type: text/plain, Size: 3158 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -112,14 +112,14 @@ void vm_event_set_registers(struct vcpu
 {
     ASSERT(atomic_read(&v->vm_event_pause_count));
 
-    v->arch.user_regs.eax = rsp->data.regs.x86.rax;
-    v->arch.user_regs.ebx = rsp->data.regs.x86.rbx;
-    v->arch.user_regs.ecx = rsp->data.regs.x86.rcx;
-    v->arch.user_regs.edx = rsp->data.regs.x86.rdx;
-    v->arch.user_regs.esp = rsp->data.regs.x86.rsp;
-    v->arch.user_regs.ebp = rsp->data.regs.x86.rbp;
-    v->arch.user_regs.esi = rsp->data.regs.x86.rsi;
-    v->arch.user_regs.edi = rsp->data.regs.x86.rdi;
+    v->arch.user_regs.rax = rsp->data.regs.x86.rax;
+    v->arch.user_regs.rbx = rsp->data.regs.x86.rbx;
+    v->arch.user_regs.rcx = rsp->data.regs.x86.rcx;
+    v->arch.user_regs.rdx = rsp->data.regs.x86.rdx;
+    v->arch.user_regs.rsp = rsp->data.regs.x86.rsp;
+    v->arch.user_regs.rbp = rsp->data.regs.x86.rbp;
+    v->arch.user_regs.rsi = rsp->data.regs.x86.rsi;
+    v->arch.user_regs.rdi = rsp->data.regs.x86.rdi;
 
     v->arch.user_regs.r8 = rsp->data.regs.x86.r8;
     v->arch.user_regs.r9 = rsp->data.regs.x86.r9;
@@ -130,8 +130,8 @@ void vm_event_set_registers(struct vcpu
     v->arch.user_regs.r14 = rsp->data.regs.x86.r14;
     v->arch.user_regs.r15 = rsp->data.regs.x86.r15;
 
-    v->arch.user_regs.eflags = rsp->data.regs.x86.rflags;
-    v->arch.user_regs.eip = rsp->data.regs.x86.rip;
+    v->arch.user_regs.rflags = rsp->data.regs.x86.rflags;
+    v->arch.user_regs.rip = rsp->data.regs.x86.rip;
 }
 
 void vm_event_monitor_next_interrupt(struct vcpu *v)
@@ -151,14 +151,14 @@ void vm_event_fill_regs(vm_event_request
     /* Architecture-specific vmcs/vmcb bits */
     hvm_funcs.save_cpu_ctxt(curr, &ctxt);
 
-    req->data.regs.x86.rax = regs->eax;
-    req->data.regs.x86.rcx = regs->ecx;
-    req->data.regs.x86.rdx = regs->edx;
-    req->data.regs.x86.rbx = regs->ebx;
-    req->data.regs.x86.rsp = regs->esp;
-    req->data.regs.x86.rbp = regs->ebp;
-    req->data.regs.x86.rsi = regs->esi;
-    req->data.regs.x86.rdi = regs->edi;
+    req->data.regs.x86.rax = regs->rax;
+    req->data.regs.x86.rcx = regs->rcx;
+    req->data.regs.x86.rdx = regs->rdx;
+    req->data.regs.x86.rbx = regs->rbx;
+    req->data.regs.x86.rsp = regs->rsp;
+    req->data.regs.x86.rbp = regs->rbp;
+    req->data.regs.x86.rsi = regs->rsi;
+    req->data.regs.x86.rdi = regs->rdi;
 
     req->data.regs.x86.r8  = regs->r8;
     req->data.regs.x86.r9  = regs->r9;
@@ -169,8 +169,8 @@ void vm_event_fill_regs(vm_event_request
     req->data.regs.x86.r14 = regs->r14;
     req->data.regs.x86.r15 = regs->r15;
 
-    req->data.regs.x86.rflags = regs->eflags;
-    req->data.regs.x86.rip    = regs->eip;
+    req->data.regs.x86.rflags = regs->rflags;
+    req->data.regs.x86.rip    = regs->rip;
 
     req->data.regs.x86.dr7 = curr->arch.debugreg[7];
     req->data.regs.x86.cr0 = ctxt.cr0;




[-- Attachment #2: x86-regnames-vmevt.patch --]
[-- Type: text/plain, Size: 3200 bytes --]

x86/vm-event: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -112,14 +112,14 @@ void vm_event_set_registers(struct vcpu
 {
     ASSERT(atomic_read(&v->vm_event_pause_count));
 
-    v->arch.user_regs.eax = rsp->data.regs.x86.rax;
-    v->arch.user_regs.ebx = rsp->data.regs.x86.rbx;
-    v->arch.user_regs.ecx = rsp->data.regs.x86.rcx;
-    v->arch.user_regs.edx = rsp->data.regs.x86.rdx;
-    v->arch.user_regs.esp = rsp->data.regs.x86.rsp;
-    v->arch.user_regs.ebp = rsp->data.regs.x86.rbp;
-    v->arch.user_regs.esi = rsp->data.regs.x86.rsi;
-    v->arch.user_regs.edi = rsp->data.regs.x86.rdi;
+    v->arch.user_regs.rax = rsp->data.regs.x86.rax;
+    v->arch.user_regs.rbx = rsp->data.regs.x86.rbx;
+    v->arch.user_regs.rcx = rsp->data.regs.x86.rcx;
+    v->arch.user_regs.rdx = rsp->data.regs.x86.rdx;
+    v->arch.user_regs.rsp = rsp->data.regs.x86.rsp;
+    v->arch.user_regs.rbp = rsp->data.regs.x86.rbp;
+    v->arch.user_regs.rsi = rsp->data.regs.x86.rsi;
+    v->arch.user_regs.rdi = rsp->data.regs.x86.rdi;
 
     v->arch.user_regs.r8 = rsp->data.regs.x86.r8;
     v->arch.user_regs.r9 = rsp->data.regs.x86.r9;
@@ -130,8 +130,8 @@ void vm_event_set_registers(struct vcpu
     v->arch.user_regs.r14 = rsp->data.regs.x86.r14;
     v->arch.user_regs.r15 = rsp->data.regs.x86.r15;
 
-    v->arch.user_regs.eflags = rsp->data.regs.x86.rflags;
-    v->arch.user_regs.eip = rsp->data.regs.x86.rip;
+    v->arch.user_regs.rflags = rsp->data.regs.x86.rflags;
+    v->arch.user_regs.rip = rsp->data.regs.x86.rip;
 }
 
 void vm_event_monitor_next_interrupt(struct vcpu *v)
@@ -151,14 +151,14 @@ void vm_event_fill_regs(vm_event_request
     /* Architecture-specific vmcs/vmcb bits */
     hvm_funcs.save_cpu_ctxt(curr, &ctxt);
 
-    req->data.regs.x86.rax = regs->eax;
-    req->data.regs.x86.rcx = regs->ecx;
-    req->data.regs.x86.rdx = regs->edx;
-    req->data.regs.x86.rbx = regs->ebx;
-    req->data.regs.x86.rsp = regs->esp;
-    req->data.regs.x86.rbp = regs->ebp;
-    req->data.regs.x86.rsi = regs->esi;
-    req->data.regs.x86.rdi = regs->edi;
+    req->data.regs.x86.rax = regs->rax;
+    req->data.regs.x86.rcx = regs->rcx;
+    req->data.regs.x86.rdx = regs->rdx;
+    req->data.regs.x86.rbx = regs->rbx;
+    req->data.regs.x86.rsp = regs->rsp;
+    req->data.regs.x86.rbp = regs->rbp;
+    req->data.regs.x86.rsi = regs->rsi;
+    req->data.regs.x86.rdi = regs->rdi;
 
     req->data.regs.x86.r8  = regs->r8;
     req->data.regs.x86.r9  = regs->r9;
@@ -169,8 +169,8 @@ void vm_event_fill_regs(vm_event_request
     req->data.regs.x86.r14 = regs->r14;
     req->data.regs.x86.r15 = regs->r15;
 
-    req->data.regs.x86.rflags = regs->eflags;
-    req->data.regs.x86.rip    = regs->eip;
+    req->data.regs.x86.rflags = regs->rflags;
+    req->data.regs.x86.rip    = regs->rip;
 
     req->data.regs.x86.dr7 = curr->arch.debugreg[7];
     req->data.regs.x86.cr0 = ctxt.cr0;

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 09/10] x86/traps: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (7 preceding siblings ...)
  2016-12-20 10:42 ` [PATCH 08/10] x86/vm-event: " Jan Beulich
@ 2016-12-20 10:42 ` Jan Beulich
  2016-12-20 10:43 ` [PATCH 10/10] x86/misc: " Jan Beulich
  2016-12-20 17:34 ` [PATCH 00/10] x86: register renaming (part I) Andrew Cooper
  10 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:42 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 13668 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -202,7 +202,7 @@ static void show_guest_stack(struct vcpu
         return;
     }
 
-    stack = (unsigned long *)regs->esp;
+    stack = (unsigned long *)regs->rsp;
     printk("Guest stack trace from "__OP"sp=%p:\n  ", stack);
 
     if ( !access_ok(stack, sizeof(*stack)) )
@@ -367,8 +367,8 @@ static void _show_trace(unsigned long sp
                 break;
             frame = (unsigned long *)next;
             next  = frame[0];
-            addr  = frame[(offsetof(struct cpu_user_regs, eip) -
-                           offsetof(struct cpu_user_regs, ebp))
+            addr  = frame[(offsetof(struct cpu_user_regs, rip) -
+                           offsetof(struct cpu_user_regs, rbp))
                          / BYTES_PER_LONG];
         }
         else
@@ -623,7 +623,7 @@ void fatal_trap(const struct cpu_user_re
     panic("FATAL TRAP: vector = %d (%s)\n"
           "[error_code=%04x] %s",
           trapnr, trapstr(trapnr), regs->error_code,
-          (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
+          (regs->_eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
 }
 
 void pv_inject_event(const struct x86_event *event)
@@ -663,7 +663,7 @@ void pv_inject_event(const struct x86_ev
         trace_pv_page_fault(event->cr2, error_code);
     }
     else
-        trace_pv_trap(vector, regs->eip, use_error_code, error_code);
+        trace_pv_trap(vector, regs->rip, use_error_code, error_code);
 
     if ( use_error_code )
     {
@@ -697,11 +697,11 @@ static inline void do_guest_trap(unsigne
     pv_inject_event(&event);
 }
 
-static void instruction_done(struct cpu_user_regs *regs, unsigned long eip)
+static void instruction_done(struct cpu_user_regs *regs, unsigned long rip)
 {
-    regs->eip = eip;
-    regs->eflags &= ~X86_EFLAGS_RF;
-    if ( regs->eflags & X86_EFLAGS_TF )
+    regs->rip = rip;
+    regs->_eflags &= ~X86_EFLAGS_RF;
+    if ( regs->_eflags & X86_EFLAGS_TF )
     {
         current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
         do_guest_trap(TRAP_debug, regs);
@@ -799,12 +799,12 @@ void do_trap(struct cpu_user_regs *regs)
         return;
     }
 
-    if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+    if ( likely((fixup = search_exception_table(regs->rip)) != 0) )
     {
         dprintk(XENLOG_ERR, "Trap %d: %p -> %p\n",
-                trapnr, _p(regs->eip), _p(fixup));
-        this_cpu(last_extable_addr) = regs->eip;
-        regs->eip = fixup;
+                trapnr, _p(regs->rip), _p(fixup));
+        this_cpu(last_extable_addr) = regs->rip;
+        regs->rip = fixup;
         return;
     }
 
@@ -1042,10 +1042,10 @@ void pv_cpuid(struct cpu_user_regs *regs
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
 
-    leaf = a = regs->eax;
-    b = regs->ebx;
-    subleaf = c = regs->ecx;
-    d = regs->edx;
+    leaf = a = regs->_eax;
+    b = regs->_ebx;
+    subleaf = c = regs->_ecx;
+    d = regs->_edx;
 
     if ( cpuid_hypervisor_leaves(leaf, subleaf, &a, &b, &c, &d) )
         goto out;
@@ -1065,10 +1065,10 @@ void pv_cpuid(struct cpu_user_regs *regs
             limit = cpuid_eax(limit);
         if ( leaf > limit )
         {
-            regs->eax = 0;
-            regs->ebx = 0;
-            regs->ecx = 0;
-            regs->edx = 0;
+            regs->rax = 0;
+            regs->rbx = 0;
+            regs->rcx = 0;
+            regs->rdx = 0;
             return;
         }
     }
@@ -1382,10 +1382,10 @@ void pv_cpuid(struct cpu_user_regs *regs
     }
 
  out:
-    regs->eax = a;
-    regs->ebx = b;
-    regs->ecx = c;
-    regs->edx = d;
+    regs->rax = a;
+    regs->rbx = b;
+    regs->rcx = c;
+    regs->rdx = d;
 }
 
 static int emulate_invalid_rdtscp(struct cpu_user_regs *regs)
@@ -1394,7 +1394,7 @@ static int emulate_invalid_rdtscp(struct
     unsigned long eip, rc;
     struct vcpu *v = current;
 
-    eip = regs->eip;
+    eip = regs->rip;
     if ( (rc = copy_from_user(opcode, (char *)eip, sizeof(opcode))) != 0 )
     {
         pv_inject_page_fault(0, eip + sizeof(opcode) - rc);
@@ -1413,7 +1413,7 @@ static int emulate_forced_invalid_op(str
     char sig[5], instr[2];
     unsigned long eip, rc;
 
-    eip = regs->eip;
+    eip = regs->rip;
 
     /* Check for forced emulation signature: ud2 ; .ascii "xen". */
     if ( (rc = copy_from_user(sig, (char *)eip, sizeof(sig))) != 0 )
@@ -1437,7 +1437,7 @@ static int emulate_forced_invalid_op(str
     /* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
     if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
     {
-        regs->eip = eip;
+        regs->rip = eip;
         do_guest_trap(TRAP_gp_fault, regs);
         return EXCRET_fault_fixed;
     }
@@ -1448,7 +1448,7 @@ static int emulate_forced_invalid_op(str
 
     instruction_done(regs, eip);
 
-    trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->eip);
+    trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->rip);
 
     return EXCRET_fault_fixed;
 }
@@ -1457,7 +1457,7 @@ void do_invalid_op(struct cpu_user_regs
 {
     const struct bug_frame *bug = NULL;
     u8 bug_insn[2];
-    const char *prefix = "", *filename, *predicate, *eip = (char *)regs->eip;
+    const char *prefix = "", *filename, *predicate, *eip = (char *)regs->rip;
     unsigned long fixup;
     int id = -1, lineno;
     const struct virtual_region *region;
@@ -1473,12 +1473,12 @@ void do_invalid_op(struct cpu_user_regs
         return;
     }
 
-    if ( !is_active_kernel_text(regs->eip) ||
+    if ( !is_active_kernel_text(regs->rip) ||
          __copy_from_user(bug_insn, eip, sizeof(bug_insn)) ||
          memcmp(bug_insn, "\xf\xb", sizeof(bug_insn)) )
         goto die;
 
-    region = find_text_region(regs->eip);
+    region = find_text_region(regs->rip);
     if ( region )
     {
         for ( id = 0; id < BUGFRAME_NR; id++ )
@@ -1507,7 +1507,7 @@ void do_invalid_op(struct cpu_user_regs
         void (*fn)(struct cpu_user_regs *) = bug_ptr(bug);
 
         fn(regs);
-        regs->eip = (unsigned long)eip;
+        regs->rip = (unsigned long)eip;
         return;
     }
 
@@ -1528,7 +1528,7 @@ void do_invalid_op(struct cpu_user_regs
     case BUGFRAME_warn:
         printk("Xen WARN at %s%s:%d\n", prefix, filename, lineno);
         show_execution_state(regs);
-        regs->eip = (unsigned long)eip;
+        regs->rip = (unsigned long)eip;
         return;
 
     case BUGFRAME_bug:
@@ -1558,10 +1558,10 @@ void do_invalid_op(struct cpu_user_regs
     }
 
  die:
-    if ( (fixup = search_exception_table(regs->eip)) != 0 )
+    if ( (fixup = search_exception_table(regs->rip)) != 0 )
     {
-        this_cpu(last_extable_addr) = regs->eip;
-        regs->eip = fixup;
+        this_cpu(last_extable_addr) = regs->rip;
+        regs->rip = fixup;
         return;
     }
 
@@ -1622,7 +1622,7 @@ static int handle_gdt_ldt_mapping_fault(
         {
             if ( guest_mode(regs) )
                 trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
-                                    regs->eip, offset);
+                                    regs->rip, offset);
         }
         else
         {
@@ -1764,7 +1764,7 @@ leaf:
          *   - Page fault in kernel mode
          */
         if ( (cr4 & X86_CR4_SMAP) && !(error_code & PFEC_user_mode) &&
-             (((regs->cs & 3) == 3) || !(regs->eflags & X86_EFLAGS_AC)) )
+             (((regs->cs & 3) == 3) || !(regs->_eflags & X86_EFLAGS_AC)) )
             return smap_fault;
     }
 
@@ -1794,7 +1794,7 @@ static int fixup_page_fault(unsigned lon
     struct domain *d = v->domain;
 
     /* No fixups in interrupt context or when interrupts are disabled. */
-    if ( in_irq() || !(regs->eflags & X86_EFLAGS_IF) )
+    if ( in_irq() || !(regs->_eflags & X86_EFLAGS_IF) )
         return 0;
 
     if ( !(regs->error_code & PFEC_page_present) &&
@@ -1841,7 +1841,7 @@ static int fixup_page_fault(unsigned lon
 
         ret = paging_fault(addr, regs);
         if ( ret == EXCRET_fault_fixed )
-            trace_trap_two_addr(TRC_PV_PAGING_FIXUP, regs->eip, addr);
+            trace_trap_two_addr(TRC_PV_PAGING_FIXUP, regs->rip, addr);
         return ret;
     }
 
@@ -1888,13 +1888,13 @@ void do_page_fault(struct cpu_user_regs
         if ( pf_type != real_fault )
             return;
 
-        if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+        if ( likely((fixup = search_exception_table(regs->rip)) != 0) )
         {
             perfc_incr(copy_user_faults);
             if ( unlikely(regs->error_code & PFEC_reserved_bit) )
                 reserved_bit_page_fault(addr, regs);
-            this_cpu(last_extable_addr) = regs->eip;
-            regs->eip = fixup;
+            this_cpu(last_extable_addr) = regs->rip;
+            regs->rip = fixup;
             return;
         }
 
@@ -1944,9 +1944,9 @@ void __init do_early_page_fault(struct c
 
     BUG_ON(smp_processor_id() != 0);
 
-    if ( (regs->eip != prev_eip) || (cr2 != prev_cr2) )
+    if ( (regs->rip != prev_eip) || (cr2 != prev_cr2) )
     {
-        prev_eip = regs->eip;
+        prev_eip = regs->rip;
         prev_cr2 = cr2;
         stuck    = 0;
         return;
@@ -1956,7 +1956,7 @@ void __init do_early_page_fault(struct c
     {
         console_start_sync();
         printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
-               regs->cs, _p(regs->eip), _p(cr2), regs->error_code);
+               regs->cs, _p(regs->rip), _p(cr2), regs->error_code);
         fatal_trap(regs, 0);
     }
 }
@@ -3699,7 +3699,7 @@ static void emulate_gate_op(struct cpu_u
                 return;
             }
             push(regs->ss);
-            push(regs->esp);
+            push(regs->rsp);
             if ( nparm )
             {
                 const unsigned int *ustkp;
@@ -3735,7 +3735,7 @@ static void emulate_gate_op(struct cpu_u
         else
         {
             sel |= (regs->cs & 3);
-            esp = regs->esp;
+            esp = regs->rsp;
             ss = regs->ss;
             if ( !read_descriptor(ss, v, &base, &limit, &ar, 0) ||
                  ((ar >> 13) & 3) != (sel & 3) )
@@ -3756,9 +3756,9 @@ static void emulate_gate_op(struct cpu_u
             }
         }
         push(regs->cs);
-        push(regs->eip + insn_len);
+        push(regs->rip + insn_len);
 #undef push
-        regs->esp = esp;
+        regs->rsp = esp;
         regs->ss = ss;
     }
     else
@@ -3811,7 +3811,7 @@ void do_general_protection(struct cpu_us
         ti = &v->arch.pv_vcpu.trap_ctxt[vector];
         if ( permit_softint(TI_GET_DPL(ti), v, regs) )
         {
-            regs->eip += 2;
+            regs->rip += 2;
             do_guest_trap(vector, regs);
             return;
         }
@@ -3826,7 +3826,7 @@ void do_general_protection(struct cpu_us
     if ( (regs->error_code == 0) &&
          emulate_privileged_op(regs) )
     {
-        trace_trap_one_addr(TRC_PV_EMULATE_PRIVOP, regs->eip);
+        trace_trap_one_addr(TRC_PV_EMULATE_PRIVOP, regs->rip);
         return;
     }
 
@@ -3836,12 +3836,12 @@ void do_general_protection(struct cpu_us
 
  gp_in_kernel:
 
-    if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+    if ( likely((fixup = search_exception_table(regs->rip)) != 0) )
     {
         dprintk(XENLOG_INFO, "GPF (%04x): %p -> %p\n",
-                regs->error_code, _p(regs->eip), _p(fixup));
-        this_cpu(last_extable_addr) = regs->eip;
-        regs->eip = fixup;
+                regs->error_code, _p(regs->rip), _p(fixup));
+        this_cpu(last_extable_addr) = regs->rip;
+        regs->rip = fixup;
         return;
     }
 
@@ -4091,20 +4091,20 @@ void do_debug(struct cpu_user_regs *regs
 
     if ( !guest_mode(regs) )
     {
-        if ( regs->eflags & X86_EFLAGS_TF )
+        if ( regs->_eflags & X86_EFLAGS_TF )
         {
             /* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
             if ( (regs->rip >= (unsigned long)sysenter_entry) &&
                  (regs->rip <= (unsigned long)sysenter_eflags_saved) )
             {
                 if ( regs->rip == (unsigned long)sysenter_eflags_saved )
-                    regs->eflags &= ~X86_EFLAGS_TF;
+                    regs->_eflags &= ~X86_EFLAGS_TF;
                 goto out;
             }
             if ( !debugger_trap_fatal(TRAP_debug, regs) )
             {
                 WARN();
-                regs->eflags &= ~X86_EFLAGS_TF;
+                regs->_eflags &= ~X86_EFLAGS_TF;
             }
         }
         else
@@ -4115,7 +4115,7 @@ void do_debug(struct cpu_user_regs *regs
              * watchpoint set on it. No need to bump EIP; the only faulting
              * trap is an instruction breakpoint, which can't happen to us.
              */
-            WARN_ON(!search_exception_table(regs->eip));
+            WARN_ON(!search_exception_table(regs->rip));
         }
         goto out;
     }



[-- Attachment #2: x86-regnames-traps.patch --]
[-- Type: text/plain, Size: 13709 bytes --]

x86/traps: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -202,7 +202,7 @@ static void show_guest_stack(struct vcpu
         return;
     }
 
-    stack = (unsigned long *)regs->esp;
+    stack = (unsigned long *)regs->rsp;
     printk("Guest stack trace from "__OP"sp=%p:\n  ", stack);
 
     if ( !access_ok(stack, sizeof(*stack)) )
@@ -367,8 +367,8 @@ static void _show_trace(unsigned long sp
                 break;
             frame = (unsigned long *)next;
             next  = frame[0];
-            addr  = frame[(offsetof(struct cpu_user_regs, eip) -
-                           offsetof(struct cpu_user_regs, ebp))
+            addr  = frame[(offsetof(struct cpu_user_regs, rip) -
+                           offsetof(struct cpu_user_regs, rbp))
                          / BYTES_PER_LONG];
         }
         else
@@ -623,7 +623,7 @@ void fatal_trap(const struct cpu_user_re
     panic("FATAL TRAP: vector = %d (%s)\n"
           "[error_code=%04x] %s",
           trapnr, trapstr(trapnr), regs->error_code,
-          (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
+          (regs->_eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
 }
 
 void pv_inject_event(const struct x86_event *event)
@@ -663,7 +663,7 @@ void pv_inject_event(const struct x86_ev
         trace_pv_page_fault(event->cr2, error_code);
     }
     else
-        trace_pv_trap(vector, regs->eip, use_error_code, error_code);
+        trace_pv_trap(vector, regs->rip, use_error_code, error_code);
 
     if ( use_error_code )
     {
@@ -697,11 +697,11 @@ static inline void do_guest_trap(unsigne
     pv_inject_event(&event);
 }
 
-static void instruction_done(struct cpu_user_regs *regs, unsigned long eip)
+static void instruction_done(struct cpu_user_regs *regs, unsigned long rip)
 {
-    regs->eip = eip;
-    regs->eflags &= ~X86_EFLAGS_RF;
-    if ( regs->eflags & X86_EFLAGS_TF )
+    regs->rip = rip;
+    regs->_eflags &= ~X86_EFLAGS_RF;
+    if ( regs->_eflags & X86_EFLAGS_TF )
     {
         current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
         do_guest_trap(TRAP_debug, regs);
@@ -799,12 +799,12 @@ void do_trap(struct cpu_user_regs *regs)
         return;
     }
 
-    if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+    if ( likely((fixup = search_exception_table(regs->rip)) != 0) )
     {
         dprintk(XENLOG_ERR, "Trap %d: %p -> %p\n",
-                trapnr, _p(regs->eip), _p(fixup));
-        this_cpu(last_extable_addr) = regs->eip;
-        regs->eip = fixup;
+                trapnr, _p(regs->rip), _p(fixup));
+        this_cpu(last_extable_addr) = regs->rip;
+        regs->rip = fixup;
         return;
     }
 
@@ -1042,10 +1042,10 @@ void pv_cpuid(struct cpu_user_regs *regs
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
 
-    leaf = a = regs->eax;
-    b = regs->ebx;
-    subleaf = c = regs->ecx;
-    d = regs->edx;
+    leaf = a = regs->_eax;
+    b = regs->_ebx;
+    subleaf = c = regs->_ecx;
+    d = regs->_edx;
 
     if ( cpuid_hypervisor_leaves(leaf, subleaf, &a, &b, &c, &d) )
         goto out;
@@ -1065,10 +1065,10 @@ void pv_cpuid(struct cpu_user_regs *regs
             limit = cpuid_eax(limit);
         if ( leaf > limit )
         {
-            regs->eax = 0;
-            regs->ebx = 0;
-            regs->ecx = 0;
-            regs->edx = 0;
+            regs->rax = 0;
+            regs->rbx = 0;
+            regs->rcx = 0;
+            regs->rdx = 0;
             return;
         }
     }
@@ -1382,10 +1382,10 @@ void pv_cpuid(struct cpu_user_regs *regs
     }
 
  out:
-    regs->eax = a;
-    regs->ebx = b;
-    regs->ecx = c;
-    regs->edx = d;
+    regs->rax = a;
+    regs->rbx = b;
+    regs->rcx = c;
+    regs->rdx = d;
 }
 
 static int emulate_invalid_rdtscp(struct cpu_user_regs *regs)
@@ -1394,7 +1394,7 @@ static int emulate_invalid_rdtscp(struct
     unsigned long eip, rc;
     struct vcpu *v = current;
 
-    eip = regs->eip;
+    eip = regs->rip;
     if ( (rc = copy_from_user(opcode, (char *)eip, sizeof(opcode))) != 0 )
     {
         pv_inject_page_fault(0, eip + sizeof(opcode) - rc);
@@ -1413,7 +1413,7 @@ static int emulate_forced_invalid_op(str
     char sig[5], instr[2];
     unsigned long eip, rc;
 
-    eip = regs->eip;
+    eip = regs->rip;
 
     /* Check for forced emulation signature: ud2 ; .ascii "xen". */
     if ( (rc = copy_from_user(sig, (char *)eip, sizeof(sig))) != 0 )
@@ -1437,7 +1437,7 @@ static int emulate_forced_invalid_op(str
     /* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
     if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
     {
-        regs->eip = eip;
+        regs->rip = eip;
         do_guest_trap(TRAP_gp_fault, regs);
         return EXCRET_fault_fixed;
     }
@@ -1448,7 +1448,7 @@ static int emulate_forced_invalid_op(str
 
     instruction_done(regs, eip);
 
-    trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->eip);
+    trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->rip);
 
     return EXCRET_fault_fixed;
 }
@@ -1457,7 +1457,7 @@ void do_invalid_op(struct cpu_user_regs
 {
     const struct bug_frame *bug = NULL;
     u8 bug_insn[2];
-    const char *prefix = "", *filename, *predicate, *eip = (char *)regs->eip;
+    const char *prefix = "", *filename, *predicate, *eip = (char *)regs->rip;
     unsigned long fixup;
     int id = -1, lineno;
     const struct virtual_region *region;
@@ -1473,12 +1473,12 @@ void do_invalid_op(struct cpu_user_regs
         return;
     }
 
-    if ( !is_active_kernel_text(regs->eip) ||
+    if ( !is_active_kernel_text(regs->rip) ||
          __copy_from_user(bug_insn, eip, sizeof(bug_insn)) ||
          memcmp(bug_insn, "\xf\xb", sizeof(bug_insn)) )
         goto die;
 
-    region = find_text_region(regs->eip);
+    region = find_text_region(regs->rip);
     if ( region )
     {
         for ( id = 0; id < BUGFRAME_NR; id++ )
@@ -1507,7 +1507,7 @@ void do_invalid_op(struct cpu_user_regs
         void (*fn)(struct cpu_user_regs *) = bug_ptr(bug);
 
         fn(regs);
-        regs->eip = (unsigned long)eip;
+        regs->rip = (unsigned long)eip;
         return;
     }
 
@@ -1528,7 +1528,7 @@ void do_invalid_op(struct cpu_user_regs
     case BUGFRAME_warn:
         printk("Xen WARN at %s%s:%d\n", prefix, filename, lineno);
         show_execution_state(regs);
-        regs->eip = (unsigned long)eip;
+        regs->rip = (unsigned long)eip;
         return;
 
     case BUGFRAME_bug:
@@ -1558,10 +1558,10 @@ void do_invalid_op(struct cpu_user_regs
     }
 
  die:
-    if ( (fixup = search_exception_table(regs->eip)) != 0 )
+    if ( (fixup = search_exception_table(regs->rip)) != 0 )
     {
-        this_cpu(last_extable_addr) = regs->eip;
-        regs->eip = fixup;
+        this_cpu(last_extable_addr) = regs->rip;
+        regs->rip = fixup;
         return;
     }
 
@@ -1622,7 +1622,7 @@ static int handle_gdt_ldt_mapping_fault(
         {
             if ( guest_mode(regs) )
                 trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
-                                    regs->eip, offset);
+                                    regs->rip, offset);
         }
         else
         {
@@ -1764,7 +1764,7 @@ leaf:
          *   - Page fault in kernel mode
          */
         if ( (cr4 & X86_CR4_SMAP) && !(error_code & PFEC_user_mode) &&
-             (((regs->cs & 3) == 3) || !(regs->eflags & X86_EFLAGS_AC)) )
+             (((regs->cs & 3) == 3) || !(regs->_eflags & X86_EFLAGS_AC)) )
             return smap_fault;
     }
 
@@ -1794,7 +1794,7 @@ static int fixup_page_fault(unsigned lon
     struct domain *d = v->domain;
 
     /* No fixups in interrupt context or when interrupts are disabled. */
-    if ( in_irq() || !(regs->eflags & X86_EFLAGS_IF) )
+    if ( in_irq() || !(regs->_eflags & X86_EFLAGS_IF) )
         return 0;
 
     if ( !(regs->error_code & PFEC_page_present) &&
@@ -1841,7 +1841,7 @@ static int fixup_page_fault(unsigned lon
 
         ret = paging_fault(addr, regs);
         if ( ret == EXCRET_fault_fixed )
-            trace_trap_two_addr(TRC_PV_PAGING_FIXUP, regs->eip, addr);
+            trace_trap_two_addr(TRC_PV_PAGING_FIXUP, regs->rip, addr);
         return ret;
     }
 
@@ -1888,13 +1888,13 @@ void do_page_fault(struct cpu_user_regs
         if ( pf_type != real_fault )
             return;
 
-        if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+        if ( likely((fixup = search_exception_table(regs->rip)) != 0) )
         {
             perfc_incr(copy_user_faults);
             if ( unlikely(regs->error_code & PFEC_reserved_bit) )
                 reserved_bit_page_fault(addr, regs);
-            this_cpu(last_extable_addr) = regs->eip;
-            regs->eip = fixup;
+            this_cpu(last_extable_addr) = regs->rip;
+            regs->rip = fixup;
             return;
         }
 
@@ -1944,9 +1944,9 @@ void __init do_early_page_fault(struct c
 
     BUG_ON(smp_processor_id() != 0);
 
-    if ( (regs->eip != prev_eip) || (cr2 != prev_cr2) )
+    if ( (regs->rip != prev_eip) || (cr2 != prev_cr2) )
     {
-        prev_eip = regs->eip;
+        prev_eip = regs->rip;
         prev_cr2 = cr2;
         stuck    = 0;
         return;
@@ -1956,7 +1956,7 @@ void __init do_early_page_fault(struct c
     {
         console_start_sync();
         printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
-               regs->cs, _p(regs->eip), _p(cr2), regs->error_code);
+               regs->cs, _p(regs->rip), _p(cr2), regs->error_code);
         fatal_trap(regs, 0);
     }
 }
@@ -3699,7 +3699,7 @@ static void emulate_gate_op(struct cpu_u
                 return;
             }
             push(regs->ss);
-            push(regs->esp);
+            push(regs->rsp);
             if ( nparm )
             {
                 const unsigned int *ustkp;
@@ -3735,7 +3735,7 @@ static void emulate_gate_op(struct cpu_u
         else
         {
             sel |= (regs->cs & 3);
-            esp = regs->esp;
+            esp = regs->rsp;
             ss = regs->ss;
             if ( !read_descriptor(ss, v, &base, &limit, &ar, 0) ||
                  ((ar >> 13) & 3) != (sel & 3) )
@@ -3756,9 +3756,9 @@ static void emulate_gate_op(struct cpu_u
             }
         }
         push(regs->cs);
-        push(regs->eip + insn_len);
+        push(regs->rip + insn_len);
 #undef push
-        regs->esp = esp;
+        regs->rsp = esp;
         regs->ss = ss;
     }
     else
@@ -3811,7 +3811,7 @@ void do_general_protection(struct cpu_us
         ti = &v->arch.pv_vcpu.trap_ctxt[vector];
         if ( permit_softint(TI_GET_DPL(ti), v, regs) )
         {
-            regs->eip += 2;
+            regs->rip += 2;
             do_guest_trap(vector, regs);
             return;
         }
@@ -3826,7 +3826,7 @@ void do_general_protection(struct cpu_us
     if ( (regs->error_code == 0) &&
          emulate_privileged_op(regs) )
     {
-        trace_trap_one_addr(TRC_PV_EMULATE_PRIVOP, regs->eip);
+        trace_trap_one_addr(TRC_PV_EMULATE_PRIVOP, regs->rip);
         return;
     }
 
@@ -3836,12 +3836,12 @@ void do_general_protection(struct cpu_us
 
  gp_in_kernel:
 
-    if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+    if ( likely((fixup = search_exception_table(regs->rip)) != 0) )
     {
         dprintk(XENLOG_INFO, "GPF (%04x): %p -> %p\n",
-                regs->error_code, _p(regs->eip), _p(fixup));
-        this_cpu(last_extable_addr) = regs->eip;
-        regs->eip = fixup;
+                regs->error_code, _p(regs->rip), _p(fixup));
+        this_cpu(last_extable_addr) = regs->rip;
+        regs->rip = fixup;
         return;
     }
 
@@ -4091,20 +4091,20 @@ void do_debug(struct cpu_user_regs *regs
 
     if ( !guest_mode(regs) )
     {
-        if ( regs->eflags & X86_EFLAGS_TF )
+        if ( regs->_eflags & X86_EFLAGS_TF )
         {
             /* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
             if ( (regs->rip >= (unsigned long)sysenter_entry) &&
                  (regs->rip <= (unsigned long)sysenter_eflags_saved) )
             {
                 if ( regs->rip == (unsigned long)sysenter_eflags_saved )
-                    regs->eflags &= ~X86_EFLAGS_TF;
+                    regs->_eflags &= ~X86_EFLAGS_TF;
                 goto out;
             }
             if ( !debugger_trap_fatal(TRAP_debug, regs) )
             {
                 WARN();
-                regs->eflags &= ~X86_EFLAGS_TF;
+                regs->_eflags &= ~X86_EFLAGS_TF;
             }
         }
         else
@@ -4115,7 +4115,7 @@ void do_debug(struct cpu_user_regs *regs
              * watchpoint set on it. No need to bump EIP; the only faulting
              * trap is an instruction breakpoint, which can't happen to us.
              */
-            WARN_ON(!search_exception_table(regs->eip));
+            WARN_ON(!search_exception_table(regs->rip));
         }
         goto out;
     }

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 10/10] x86/misc: use unambiguous register names
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (8 preceding siblings ...)
  2016-12-20 10:42 ` [PATCH 09/10] x86/traps: " Jan Beulich
@ 2016-12-20 10:43 ` Jan Beulich
  2016-12-20 17:34 ` [PATCH 00/10] x86: register renaming (part I) Andrew Cooper
  10 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-20 10:43 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 9556 bytes --]

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -265,7 +265,7 @@ void vpmu_do_interrupt(struct cpu_user_r
             cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
             cmp->ip = cur_regs->rip;
             cmp->sp = cur_regs->rsp;
-            cmp->flags = cur_regs->eflags;
+            cmp->flags = cur_regs->rflags;
             cmp->ss = cur_regs->ss;
             cmp->cs = cur_regs->cs;
             if ( (cmp->cs & 3) > 1 )
@@ -288,7 +288,7 @@ void vpmu_do_interrupt(struct cpu_user_r
 
             r->ip = cur_regs->rip;
             r->sp = cur_regs->rsp;
-            r->flags = cur_regs->eflags;
+            r->flags = cur_regs->rflags;
 
             if ( !has_hvm_container_vcpu(sampled) )
             {
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1044,11 +1044,11 @@ int arch_set_info_guest(
     init_int80_direct_trap(v);
 
     /* IOPL privileges are virtualised. */
-    v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
-    v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
+    v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL;
+    v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL;
 
     /* Ensure real hardware interrupts are enabled. */
-    v->arch.user_regs.eflags |= X86_EFLAGS_IF;
+    v->arch.user_regs._eflags |= X86_EFLAGS_IF;
 
     if ( !v->is_initialised )
     {
@@ -2235,7 +2235,7 @@ void hypercall_cancel_continuation(void)
     else
     {
         if ( is_pv_vcpu(current) )
-            regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */
+            regs->rip += 2; /* skip re-execute 'syscall' / 'int $xx' */
         else
             current->arch.hvm_vcpu.hcall_preempted = 0;
     }
@@ -2264,11 +2264,11 @@ unsigned long hypercall_create_continuat
         struct cpu_user_regs *regs = guest_cpu_user_regs();
         struct vcpu *curr = current;
 
-        regs->eax = op;
+        regs->rax = op;
 
         /* Ensure the hypercall trap instruction is re-executed. */
         if ( is_pv_vcpu(curr) )
-            regs->eip -= 2;  /* re-execute 'syscall' / 'int $xx' */
+            regs->rip -= 2;  /* re-execute 'syscall' / 'int $xx' */
         else
             curr->arch.hvm_vcpu.hcall_preempted = 1;
 
@@ -2297,12 +2297,12 @@ unsigned long hypercall_create_continuat
                 arg = next_arg(p, args);
                 switch ( i )
                 {
-                case 0: regs->ebx = arg; break;
-                case 1: regs->ecx = arg; break;
-                case 2: regs->edx = arg; break;
-                case 3: regs->esi = arg; break;
-                case 4: regs->edi = arg; break;
-                case 5: regs->ebp = arg; break;
+                case 0: regs->rbx = arg; break;
+                case 1: regs->rcx = arg; break;
+                case 2: regs->rdx = arg; break;
+                case 3: regs->rsi = arg; break;
+                case 4: regs->rdi = arg; break;
+                case 5: regs->rbp = arg; break;
                 }
             }
         }
@@ -2372,12 +2372,12 @@ int hypercall_xlat_continuation(unsigned
 
             switch ( i )
             {
-            case 0: reg = &regs->ebx; break;
-            case 1: reg = &regs->ecx; break;
-            case 2: reg = &regs->edx; break;
-            case 3: reg = &regs->esi; break;
-            case 4: reg = &regs->edi; break;
-            case 5: reg = &regs->ebp; break;
+            case 0: reg = &regs->rbx; break;
+            case 1: reg = &regs->rcx; break;
+            case 2: reg = &regs->rdx; break;
+            case 3: reg = &regs->rsi; break;
+            case 4: reg = &regs->rdi; break;
+            case 5: reg = &regs->rbp; break;
             default: BUG(); reg = NULL; break;
             }
             if ( (mask & 1) )
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1584,10 +1584,10 @@ int __init construct_dom0(
     /*
      * Initial register values:
      *  DS,ES,FS,GS = FLAT_KERNEL_DS
-     *       CS:EIP = FLAT_KERNEL_CS:start_pc
-     *       SS:ESP = FLAT_KERNEL_SS:start_stack
-     *          ESI = start_info
-     *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
+     *       CS:rIP = FLAT_KERNEL_CS:start_pc
+     *       SS:rSP = FLAT_KERNEL_SS:start_stack
+     *          rSI = start_info
+     *  [rAX,rBX,rCX,rDX,rDI,rBP,R8-R15 are zero]
      */
     regs = &v->arch.user_regs;
     regs->ds = regs->es = regs->fs = regs->gs =
@@ -1596,10 +1596,10 @@ int __init construct_dom0(
                 FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
     regs->cs = (!is_pv_32bit_domain(d) ?
                 FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
-    regs->eip = parms.virt_entry;
-    regs->esp = vstack_end;
-    regs->esi = vstartinfo_start;
-    regs->eflags = X86_EFLAGS_IF;
+    regs->rip = parms.virt_entry;
+    regs->rsp = vstack_end;
+    regs->rsi = vstartinfo_start;
+    regs->_eflags = X86_EFLAGS_IF;
 
 #ifdef CONFIG_SHADOW_PAGING
     if ( opt_dom0_shadow )
--- a/xen/arch/x86/extable.c
+++ b/xen/arch/x86/extable.c
@@ -98,7 +98,7 @@ search_exception_table(unsigned long add
 unsigned long
 search_pre_exception_table(struct cpu_user_regs *regs)
 {
-    unsigned long addr = (unsigned long)regs->eip;
+    unsigned long addr = regs->rip;
     unsigned long fixup = search_one_extable(
         __start___pre_ex_table, __stop___pre_ex_table-1, addr);
     if ( fixup )
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -146,7 +146,7 @@ void pv_hypercall(struct cpu_user_regs *
 
     ASSERT(guest_kernel_mode(curr, regs));
 
-    eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->eax;
+    eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->rax;
 
     BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
                  ARRAY_SIZE(hypercall_args_table));
@@ -154,7 +154,7 @@ void pv_hypercall(struct cpu_user_regs *
     if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
          !pv_hypercall_table[eax].native )
     {
-        regs->eax = -ENOSYS;
+        regs->rax = -ENOSYS;
         return;
     }
 
@@ -186,7 +186,7 @@ void pv_hypercall(struct cpu_user_regs *
             __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args);
         }
 
-        regs->eax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
+        regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
 
 #ifndef NDEBUG
         if ( regs->rip == old_rip )
--- a/xen/arch/x86/trace.c
+++ b/xen/arch/x86/trace.c
@@ -48,7 +48,7 @@ void __trace_pv_trap(int trapnr, unsigne
 
 void __trace_pv_page_fault(unsigned long addr, unsigned error_code)
 {
-    unsigned long eip = guest_cpu_user_regs()->eip;
+    unsigned long eip = guest_cpu_user_regs()->rip;
 
     if ( is_pv_32bit_vcpu(current) )
     {
@@ -119,7 +119,7 @@ void __trace_trap_two_addr(unsigned even
 
 void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
 {
-    unsigned long eip = guest_cpu_user_regs()->eip;
+    unsigned long eip = guest_cpu_user_regs()->rip;
 
     /* We have a couple of different modes to worry about:
      * - 32-on-32: 32-bit pte, 32-bit virtual addresses
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -44,7 +44,7 @@ void __dummy__(void)
     OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
     OFFSET(UREGS_rip, struct cpu_user_regs, rip);
     OFFSET(UREGS_cs, struct cpu_user_regs, cs);
-    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
+    OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
     OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
     OFFSET(UREGS_ss, struct cpu_user_regs, ss);
     OFFSET(UREGS_ds, struct cpu_user_regs, ds);
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -327,7 +327,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
                 struct cpu_user_regs *regs = guest_cpu_user_regs();
                 struct mc_state *mcs = &current->mc_state;
                 unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
-                                    ? regs->ecx
+                                    ? regs->_ecx
                                     : mcs->call.args[1];
                 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
 
--- a/xen/include/asm-x86/regs.h
+++ b/xen/include/asm-x86/regs.h
@@ -15,6 +15,6 @@
     (diff == 0);                                                              \
 })
 
-#define return_reg(v) ((v)->arch.user_regs.eax)
+#define return_reg(v) ((v)->arch.user_regs.rax)
 
 #endif /* __X86_REGS_H__ */
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -764,7 +764,7 @@ typedef struct shared_info shared_info_t
  *         (may be omitted)
  *      c. list of allocated page frames [mfn_list, nr_pages]
  *         (unless relocated due to XEN_ELFNOTE_INIT_P2M)
- *      d. start_info_t structure        [register ESI (x86)]
+ *      d. start_info_t structure        [register rSI (x86)]
  *         in case of dom0 this page contains the console info, too
  *      e. unless dom0: xenstore ring page
  *      f. unless dom0: console ring page



[-- Attachment #2: x86-regnames-misc.patch --]
[-- Type: text/plain, Size: 9596 bytes --]

x86/misc: use unambiguous register names

This is in preparation of eliminating the mis-naming of 64-bit fields
with 32-bit register names (eflags instead of rflags etc). Use the
guaranteed 32-bit underscore prefixed names for now where appropriate.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -265,7 +265,7 @@ void vpmu_do_interrupt(struct cpu_user_r
             cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
             cmp->ip = cur_regs->rip;
             cmp->sp = cur_regs->rsp;
-            cmp->flags = cur_regs->eflags;
+            cmp->flags = cur_regs->rflags;
             cmp->ss = cur_regs->ss;
             cmp->cs = cur_regs->cs;
             if ( (cmp->cs & 3) > 1 )
@@ -288,7 +288,7 @@ void vpmu_do_interrupt(struct cpu_user_r
 
             r->ip = cur_regs->rip;
             r->sp = cur_regs->rsp;
-            r->flags = cur_regs->eflags;
+            r->flags = cur_regs->rflags;
 
             if ( !has_hvm_container_vcpu(sampled) )
             {
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1044,11 +1044,11 @@ int arch_set_info_guest(
     init_int80_direct_trap(v);
 
     /* IOPL privileges are virtualised. */
-    v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
-    v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
+    v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL;
+    v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL;
 
     /* Ensure real hardware interrupts are enabled. */
-    v->arch.user_regs.eflags |= X86_EFLAGS_IF;
+    v->arch.user_regs._eflags |= X86_EFLAGS_IF;
 
     if ( !v->is_initialised )
     {
@@ -2235,7 +2235,7 @@ void hypercall_cancel_continuation(void)
     else
     {
         if ( is_pv_vcpu(current) )
-            regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */
+            regs->rip += 2; /* skip re-execute 'syscall' / 'int $xx' */
         else
             current->arch.hvm_vcpu.hcall_preempted = 0;
     }
@@ -2264,11 +2264,11 @@ unsigned long hypercall_create_continuat
         struct cpu_user_regs *regs = guest_cpu_user_regs();
         struct vcpu *curr = current;
 
-        regs->eax = op;
+        regs->rax = op;
 
         /* Ensure the hypercall trap instruction is re-executed. */
         if ( is_pv_vcpu(curr) )
-            regs->eip -= 2;  /* re-execute 'syscall' / 'int $xx' */
+            regs->rip -= 2;  /* re-execute 'syscall' / 'int $xx' */
         else
             curr->arch.hvm_vcpu.hcall_preempted = 1;
 
@@ -2297,12 +2297,12 @@ unsigned long hypercall_create_continuat
                 arg = next_arg(p, args);
                 switch ( i )
                 {
-                case 0: regs->ebx = arg; break;
-                case 1: regs->ecx = arg; break;
-                case 2: regs->edx = arg; break;
-                case 3: regs->esi = arg; break;
-                case 4: regs->edi = arg; break;
-                case 5: regs->ebp = arg; break;
+                case 0: regs->rbx = arg; break;
+                case 1: regs->rcx = arg; break;
+                case 2: regs->rdx = arg; break;
+                case 3: regs->rsi = arg; break;
+                case 4: regs->rdi = arg; break;
+                case 5: regs->rbp = arg; break;
                 }
             }
         }
@@ -2372,12 +2372,12 @@ int hypercall_xlat_continuation(unsigned
 
             switch ( i )
             {
-            case 0: reg = &regs->ebx; break;
-            case 1: reg = &regs->ecx; break;
-            case 2: reg = &regs->edx; break;
-            case 3: reg = &regs->esi; break;
-            case 4: reg = &regs->edi; break;
-            case 5: reg = &regs->ebp; break;
+            case 0: reg = &regs->rbx; break;
+            case 1: reg = &regs->rcx; break;
+            case 2: reg = &regs->rdx; break;
+            case 3: reg = &regs->rsi; break;
+            case 4: reg = &regs->rdi; break;
+            case 5: reg = &regs->rbp; break;
             default: BUG(); reg = NULL; break;
             }
             if ( (mask & 1) )
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1584,10 +1584,10 @@ int __init construct_dom0(
     /*
      * Initial register values:
      *  DS,ES,FS,GS = FLAT_KERNEL_DS
-     *       CS:EIP = FLAT_KERNEL_CS:start_pc
-     *       SS:ESP = FLAT_KERNEL_SS:start_stack
-     *          ESI = start_info
-     *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
+     *       CS:rIP = FLAT_KERNEL_CS:start_pc
+     *       SS:rSP = FLAT_KERNEL_SS:start_stack
+     *          rSI = start_info
+     *  [rAX,rBX,rCX,rDX,rDI,rBP,R8-R15 are zero]
      */
     regs = &v->arch.user_regs;
     regs->ds = regs->es = regs->fs = regs->gs =
@@ -1596,10 +1596,10 @@ int __init construct_dom0(
                 FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
     regs->cs = (!is_pv_32bit_domain(d) ?
                 FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
-    regs->eip = parms.virt_entry;
-    regs->esp = vstack_end;
-    regs->esi = vstartinfo_start;
-    regs->eflags = X86_EFLAGS_IF;
+    regs->rip = parms.virt_entry;
+    regs->rsp = vstack_end;
+    regs->rsi = vstartinfo_start;
+    regs->_eflags = X86_EFLAGS_IF;
 
 #ifdef CONFIG_SHADOW_PAGING
     if ( opt_dom0_shadow )
--- a/xen/arch/x86/extable.c
+++ b/xen/arch/x86/extable.c
@@ -98,7 +98,7 @@ search_exception_table(unsigned long add
 unsigned long
 search_pre_exception_table(struct cpu_user_regs *regs)
 {
-    unsigned long addr = (unsigned long)regs->eip;
+    unsigned long addr = regs->rip;
     unsigned long fixup = search_one_extable(
         __start___pre_ex_table, __stop___pre_ex_table-1, addr);
     if ( fixup )
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -146,7 +146,7 @@ void pv_hypercall(struct cpu_user_regs *
 
     ASSERT(guest_kernel_mode(curr, regs));
 
-    eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->eax;
+    eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->rax;
 
     BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
                  ARRAY_SIZE(hypercall_args_table));
@@ -154,7 +154,7 @@ void pv_hypercall(struct cpu_user_regs *
     if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
          !pv_hypercall_table[eax].native )
     {
-        regs->eax = -ENOSYS;
+        regs->rax = -ENOSYS;
         return;
     }
 
@@ -186,7 +186,7 @@ void pv_hypercall(struct cpu_user_regs *
             __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args);
         }
 
-        regs->eax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
+        regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
 
 #ifndef NDEBUG
         if ( regs->rip == old_rip )
--- a/xen/arch/x86/trace.c
+++ b/xen/arch/x86/trace.c
@@ -48,7 +48,7 @@ void __trace_pv_trap(int trapnr, unsigne
 
 void __trace_pv_page_fault(unsigned long addr, unsigned error_code)
 {
-    unsigned long eip = guest_cpu_user_regs()->eip;
+    unsigned long eip = guest_cpu_user_regs()->rip;
 
     if ( is_pv_32bit_vcpu(current) )
     {
@@ -119,7 +119,7 @@ void __trace_trap_two_addr(unsigned even
 
 void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
 {
-    unsigned long eip = guest_cpu_user_regs()->eip;
+    unsigned long eip = guest_cpu_user_regs()->rip;
 
     /* We have a couple of different modes to worry about:
      * - 32-on-32: 32-bit pte, 32-bit virtual addresses
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -44,7 +44,7 @@ void __dummy__(void)
     OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
     OFFSET(UREGS_rip, struct cpu_user_regs, rip);
     OFFSET(UREGS_cs, struct cpu_user_regs, cs);
-    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
+    OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
     OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
     OFFSET(UREGS_ss, struct cpu_user_regs, ss);
     OFFSET(UREGS_ds, struct cpu_user_regs, ds);
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -327,7 +327,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
                 struct cpu_user_regs *regs = guest_cpu_user_regs();
                 struct mc_state *mcs = &current->mc_state;
                 unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
-                                    ? regs->ecx
+                                    ? regs->_ecx
                                     : mcs->call.args[1];
                 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
 
--- a/xen/include/asm-x86/regs.h
+++ b/xen/include/asm-x86/regs.h
@@ -15,6 +15,6 @@
     (diff == 0);                                                              \
 })
 
-#define return_reg(v) ((v)->arch.user_regs.eax)
+#define return_reg(v) ((v)->arch.user_regs.rax)
 
 #endif /* __X86_REGS_H__ */
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -764,7 +764,7 @@ typedef struct shared_info shared_info_t
  *         (may be omitted)
  *      c. list of allocated page frames [mfn_list, nr_pages]
  *         (unless relocated due to XEN_ELFNOTE_INIT_P2M)
- *      d. start_info_t structure        [register ESI (x86)]
+ *      d. start_info_t structure        [register rSI (x86)]
  *         in case of dom0 this page contains the console info, too
  *      e. unless dom0: xenstore ring page
  *      f. unless dom0: console ring page

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 03/10] x86/shadow: use unambiguous register names
  2016-12-20 10:38 ` [PATCH 03/10] x86/shadow: " Jan Beulich
@ 2016-12-20 11:04   ` Tim Deegan
  0 siblings, 0 replies; 22+ messages in thread
From: Tim Deegan @ 2016-12-20 11:04 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, xen-devel, Andrew Cooper

At 03:38 -0700 on 20 Dec (1482205097), Jan Beulich wrote:
> This is in preparation of eliminating the mis-naming of 64-bit fields
> with 32-bit register names (eflags instead of rflags etc).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 08/10] x86/vm-event: use unambiguous register names
  2016-12-20 10:42 ` [PATCH 08/10] x86/vm-event: " Jan Beulich
@ 2016-12-20 17:30   ` Tamas K Lengyel
  2016-12-22 16:08   ` Razvan Cojocaru
  1 sibling, 0 replies; 22+ messages in thread
From: Tamas K Lengyel @ 2016-12-20 17:30 UTC (permalink / raw)
  To: Jan Beulich; +Cc: George Dunlap, xen-devel, Razvan Cojocaru, Andrew Cooper


[-- Attachment #1.1: Type: text/plain, Size: 296 bytes --]

2016-12-20 3:42 GMT-07:00 Jan Beulich <JBeulich@suse.com>:

> This is in preparation of eliminating the mis-naming of 64-bit fields
> with 32-bit register names (eflags instead of rflags etc).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>

 Acked-by: Tamas K Lengyel <tamas@tklengyel.com>

[-- Attachment #1.2: Type: text/html, Size: 732 bytes --]

[-- Attachment #2: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 00/10] x86: register renaming (part I)
  2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
                   ` (9 preceding siblings ...)
  2016-12-20 10:43 ` [PATCH 10/10] x86/misc: " Jan Beulich
@ 2016-12-20 17:34 ` Andrew Cooper
  10 siblings, 0 replies; 22+ messages in thread
From: Andrew Cooper @ 2016-12-20 17:34 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap

On 20/12/2016 09:55, Jan Beulich wrote:
> This is a first (of three, as far as current plans go) steps to do away
> with misleading register names (eax instead of rax).
>
> 01: x86/MSR: introduce MSR access split/fold helpers
> 02: x86/guest-walk: use unambiguous register names
> 03: x86/shadow: use unambiguous register names
> 04: x86/oprofile: use unambiguous register names
> 05: x86/HVM: use unambiguous register names
> 06: x86/HVMemul: use unambiguous register names
> 07: x86/SVM: use unambiguous register names
> (VMX counterpart omitted for now, as I'll need to re-base)
> 08: x86/vm-event: use unambiguous register names
> 09: x86/traps: use unambiguous register names
> 10: x86/misc: use unambiguous register names
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>

I haven't looked at these carefully, but they all seem to be sensible
mechanical changes, so

Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 05/10] x86/HVM: use unambiguous register names
  2016-12-20 10:39 ` [PATCH 05/10] x86/HVM: " Jan Beulich
@ 2016-12-20 17:39   ` Andrew Cooper
  2016-12-21 15:31     ` Jan Beulich
  0 siblings, 1 reply; 22+ messages in thread
From: Andrew Cooper @ 2016-12-20 17:39 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap

On 20/12/2016 10:39, Jan Beulich wrote:
> @@ -3032,16 +3032,16 @@ void hvm_task_switch(
>      if ( hvm_set_cr3(tss.cr3, 1) )
>          goto out;
>  
> -    regs->eip    = tss.eip;
> -    regs->eflags = tss.eflags | 2;
> -    regs->eax    = tss.eax;
> -    regs->ecx    = tss.ecx;
> -    regs->edx    = tss.edx;
> -    regs->ebx    = tss.ebx;
> -    regs->esp    = tss.esp;
> -    regs->ebp    = tss.ebp;
> -    regs->esi    = tss.esi;
> -    regs->edi    = tss.edi;
> +    regs->rip    = tss.eip;
> +    regs->rflags = tss.eflags | 2;

As you are modifying this anyway, mind avoiding this opencoding?

~Andrew

> +    regs->rax    = tss.eax;
> +    regs->rcx    = tss.ecx;
> +    regs->rdx    = tss.edx;
> +    regs->rbx    = tss.ebx;
> +    regs->rsp    = tss.esp;
> +    regs->rbp    = tss.ebp;
> +    regs->rsi    = tss.esi;
> +    regs->rdi    = tss.edi;
>  
>      exn_raised = 0;
>      if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 05/10] x86/HVM: use unambiguous register names
  2016-12-20 17:39   ` Andrew Cooper
@ 2016-12-21 15:31     ` Jan Beulich
  0 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-21 15:31 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: George Dunlap, xen-devel

>>> On 20.12.16 at 18:39, <andrew.cooper3@citrix.com> wrote:
> On 20/12/2016 10:39, Jan Beulich wrote:
>> @@ -3032,16 +3032,16 @@ void hvm_task_switch(
>>      if ( hvm_set_cr3(tss.cr3, 1) )
>>          goto out;
>>  
>> -    regs->eip    = tss.eip;
>> -    regs->eflags = tss.eflags | 2;
>> -    regs->eax    = tss.eax;
>> -    regs->ecx    = tss.ecx;
>> -    regs->edx    = tss.edx;
>> -    regs->ebx    = tss.ebx;
>> -    regs->esp    = tss.esp;
>> -    regs->ebp    = tss.ebp;
>> -    regs->esi    = tss.esi;
>> -    regs->edi    = tss.edi;
>> +    regs->rip    = tss.eip;
>> +    regs->rflags = tss.eflags | 2;
> 
> As you are modifying this anyway, mind avoiding this opencoding?

Oh, sure - should have paid attention myself.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 08/10] x86/vm-event: use unambiguous register names
  2016-12-20 10:42 ` [PATCH 08/10] x86/vm-event: " Jan Beulich
  2016-12-20 17:30   ` Tamas K Lengyel
@ 2016-12-22 16:08   ` Razvan Cojocaru
  1 sibling, 0 replies; 22+ messages in thread
From: Razvan Cojocaru @ 2016-12-22 16:08 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Andrew Cooper, tamas

On 12/20/2016 12:42 PM, Jan Beulich wrote:
> This is in preparation of eliminating the mis-naming of 64-bit fields
> with 32-bit register names (eflags instead of rflags etc).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>


Thanks,
Razvan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers
  2016-12-20 10:36 ` [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers Jan Beulich
@ 2016-12-23  6:17   ` Tian, Kevin
  2016-12-26  4:54     ` Suravee Suthikulpanit
  0 siblings, 1 reply; 22+ messages in thread
From: Tian, Kevin @ 2016-12-23  6:17 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: George Dunlap, Andrew Cooper, Boris Ostrovsky, Nakajima, Jun,
	Suravee Suthikulpanit

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Tuesday, December 20, 2016 6:36 PM
> 
> This is in preparation of eliminating the mis-naming of 64-bit fields
> with 32-bit register names (eflags instead of rflags etc). Use the
> guaranteed 32-bit underscore prefixed names for now where appropriate.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers
  2016-12-23  6:17   ` Tian, Kevin
@ 2016-12-26  4:54     ` Suravee Suthikulpanit
  0 siblings, 0 replies; 22+ messages in thread
From: Suravee Suthikulpanit @ 2016-12-26  4:54 UTC (permalink / raw)
  To: Tian, Kevin, Jan Beulich, xen-devel
  Cc: George Dunlap, Andrew Cooper, Boris Ostrovsky, Nakajima, Jun



On 12/23/16 13:17, Tian, Kevin wrote:
>> From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: Tuesday, December 20, 2016 6:36 PM
>>
>> This is in preparation of eliminating the mis-naming of 64-bit fields
>> with 32-bit register names (eflags instead of rflags etc). Use the
>> guaranteed 32-bit underscore prefixed names for now where appropriate.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
>

Reviewd-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 07/10] x86/SVM: use unambiguous register names
  2016-12-20 10:41 ` [PATCH 07/10] x86/SVM: " Jan Beulich
@ 2016-12-26  5:46   ` Suravee Suthikulpanit
  0 siblings, 0 replies; 22+ messages in thread
From: Suravee Suthikulpanit @ 2016-12-26  5:46 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Andrew Cooper, Boris Ostrovsky



On 12/20/16 17:41, Jan Beulich wrote:
> This is in preparation of eliminating the mis-naming of 64-bit fields
> with 32-bit register names (eflags instead of rflags etc). Use the
> guaranteed 32-bit underscore prefixed names for now where appropriate.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 02/10] x86/guest-walk: use unambiguous register names
  2016-12-20 10:36 ` [PATCH 02/10] x86/guest-walk: use unambiguous register names Jan Beulich
@ 2016-12-28 11:18   ` George Dunlap
  2016-12-28 13:53     ` Jan Beulich
  0 siblings, 1 reply; 22+ messages in thread
From: George Dunlap @ 2016-12-28 11:18 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Andrew Cooper

On Tue, Dec 20, 2016 at 10:36 AM, Jan Beulich <JBeulich@suse.com> wrote:
> This is in preparation of eliminating the mis-naming of 64-bit fields
> with 32-bit register names (eflags instead of rflags etc). Use the
> guaranteed 32-bit underscore prefixed names for now where appropriate.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Any particular reason to use _eflags rather than just using rflags here?

Either way:

Acked-by: George Dunlap <george.dunlap@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 02/10] x86/guest-walk: use unambiguous register names
  2016-12-28 11:18   ` George Dunlap
@ 2016-12-28 13:53     ` Jan Beulich
  0 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2016-12-28 13:53 UTC (permalink / raw)
  To: George.Dunlap; +Cc: andrew.cooper3, xen-devel

>>> George Dunlap <George.Dunlap@eu.citrix.com> 12/28/16 12:18 PM >>>
>On Tue, Dec 20, 2016 at 10:36 AM, Jan Beulich <JBeulich@suse.com> wrote:
>> This is in preparation of eliminating the mis-naming of 64-bit fields
>> with 32-bit register names (eflags instead of rflags etc). Use the
>> guaranteed 32-bit underscore prefixed names for now where appropriate.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
>Any particular reason to use _eflags rather than just using rflags here?

32-bit accesses are shorter instruction byte wise on average (not requiring REX.W).
And the ultimate goal is to switch to eflags (without the underscore here), just that
this can't be easily done in a single step (hence the "part I" in the title of the
overview mail). And using _eflags now allows us to easily find such uses (once
that field name goes away), whereas rflags is to stay (and hence would need to be
grep-ed for instead of the compiler pointing out any leftover uses).

>Either way:
>
>Acked-by: George Dunlap <george.dunlap@citrix.com>

Thanks.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2016-12-28 13:53 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-20  9:55 [PATCH 00/10] x86: register renaming (part I) Jan Beulich
2016-12-20 10:36 ` [PATCH 01/10] x86/MSR: introduce MSR access split/fold helpers Jan Beulich
2016-12-23  6:17   ` Tian, Kevin
2016-12-26  4:54     ` Suravee Suthikulpanit
2016-12-20 10:36 ` [PATCH 02/10] x86/guest-walk: use unambiguous register names Jan Beulich
2016-12-28 11:18   ` George Dunlap
2016-12-28 13:53     ` Jan Beulich
2016-12-20 10:38 ` [PATCH 03/10] x86/shadow: " Jan Beulich
2016-12-20 11:04   ` Tim Deegan
2016-12-20 10:39 ` [PATCH 04/10] x86/oprofile: " Jan Beulich
2016-12-20 10:39 ` [PATCH 05/10] x86/HVM: " Jan Beulich
2016-12-20 17:39   ` Andrew Cooper
2016-12-21 15:31     ` Jan Beulich
2016-12-20 10:40 ` [PATCH 06/10] x86/HVMemul: " Jan Beulich
2016-12-20 10:41 ` [PATCH 07/10] x86/SVM: " Jan Beulich
2016-12-26  5:46   ` Suravee Suthikulpanit
2016-12-20 10:42 ` [PATCH 08/10] x86/vm-event: " Jan Beulich
2016-12-20 17:30   ` Tamas K Lengyel
2016-12-22 16:08   ` Razvan Cojocaru
2016-12-20 10:42 ` [PATCH 09/10] x86/traps: " Jan Beulich
2016-12-20 10:43 ` [PATCH 10/10] x86/misc: " Jan Beulich
2016-12-20 17:34 ` [PATCH 00/10] x86: register renaming (part I) Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.