All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] x86: further is_..._...() adjustments
@ 2015-06-23 15:14 Jan Beulich
  2015-06-23 15:18 ` [PATCH 1/3] x86: drop is_pv_32on64_vcpu() Jan Beulich
                   ` (2 more replies)
  0 siblings, 3 replies; 14+ messages in thread
From: Jan Beulich @ 2015-06-23 15:14 UTC (permalink / raw)
  To: xen-devel
  Cc: Keir Fraser, George Dunlap, Andrew Cooper, Ian Jackson,
	Tim Deegan, Ian Campbell

1: drop is_pv_32on64_vcpu()
2: drop is_pv_32on64_domain()
3: use is_..._vcpu() instead of open coding it

Some of this extends into common code, hence the wider Cc list.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
  2015-06-23 15:14 [PATCH 0/3] x86: further is_..._...() adjustments Jan Beulich
@ 2015-06-23 15:18 ` Jan Beulich
  2015-06-23 15:39   ` Andrew Cooper
  2015-06-24 21:35   ` Boris Ostrovsky
  2015-06-23 15:19 ` [PATCH 2/3] x86: drop is_pv_32on64_domain() Jan Beulich
  2015-06-23 15:20 ` [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it Jan Beulich
  2 siblings, 2 replies; 14+ messages in thread
From: Jan Beulich @ 2015-06-23 15:18 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 9367 bytes --]

... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.

In a few cases this includes an additional is_pv_32bit_vcpu() ->
is_pv_32bit_domain() conversion.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -1339,7 +1339,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
             mctelem_cookie_t cookie = ID2COOKIE(mc_fetch.nat->fetch_id);
             mctelem_ack(which, cookie);
         } else {
-            if (!is_pv_32on64_vcpu(v)
+            if (!is_pv_32bit_vcpu(v)
                 ? guest_handle_is_null(mc_fetch.nat->data)
                 : compat_handle_is_null(mc_fetch.cmp->data))
                 return x86_mcerr("do_mca fetch: guest buffer "
@@ -1347,7 +1347,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
 
             if ((mctc = mctelem_consume_oldest_begin(which))) {
                 struct mc_info *mcip = mctelem_dataptr(mctc);
-                if (!is_pv_32on64_vcpu(v)
+                if (!is_pv_32bit_vcpu(v)
                     ? copy_to_guest(mc_fetch.nat->data, mcip, 1)
                     : copy_to_compat(mc_fetch.cmp->data,
                                      mcip, 1)) {
@@ -1378,7 +1378,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
         mc_physcpuinfo.nat = &op->u.mc_physcpuinfo;
         nlcpu = num_online_cpus();
 
-        if (!is_pv_32on64_vcpu(v)
+        if (!is_pv_32bit_vcpu(v)
             ? !guest_handle_is_null(mc_physcpuinfo.nat->info)
             : !compat_handle_is_null(mc_physcpuinfo.cmp->info)) {
             if (mc_physcpuinfo.nat->ncpus <= 0)
@@ -1389,7 +1389,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
             if (log_cpus == NULL)
                 return x86_mcerr("do_mca cpuinfo", -ENOMEM);
             on_each_cpu(do_mc_get_cpu_info, log_cpus, 1);
-            if (!is_pv_32on64_vcpu(v)
+            if (!is_pv_32bit_vcpu(v)
                 ? copy_to_guest(mc_physcpuinfo.nat->info,
                                 log_cpus, nlcpu)
                 : copy_to_compat(mc_physcpuinfo.cmp->info,
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -496,7 +496,7 @@ int vcpu_initialise(struct vcpu *v)
 
 void vcpu_destroy(struct vcpu *v)
 {
-    if ( is_pv_32on64_vcpu(v) )
+    if ( is_pv_32bit_vcpu(v) )
         release_compat_l4(v);
 
     vcpu_destroy_fpu(v);
@@ -1705,7 +1705,7 @@ unsigned long hypercall_create_continuat
             curr->arch.hvm_vcpu.hcall_preempted = 1;
 
         if ( is_pv_vcpu(curr) ?
-             !is_pv_32on64_vcpu(curr) :
+             !is_pv_32bit_vcpu(curr) :
              curr->arch.hvm_vcpu.hcall_64bit )
         {
             for ( i = 0; *p != '\0'; i++ )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2652,7 +2652,7 @@ int vcpu_destroy_pagetables(struct vcpu 
     if ( rc )
         return rc;
 
-    if ( is_pv_32on64_vcpu(v) )
+    if ( is_pv_32bit_vcpu(v) )
     {
         l4tab = map_domain_page(mfn);
         mfn = l4e_get_pfn(*l4tab);
--- a/xen/arch/x86/trace.c
+++ b/xen/arch/x86/trace.c
@@ -11,7 +11,7 @@ void __trace_hypercall_entry(void)
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     unsigned long args[6];
 
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         args[0] = regs->ebx;
         args[1] = regs->ecx;
@@ -36,7 +36,7 @@ void __trace_hypercall_entry(void)
 void __trace_pv_trap(int trapnr, unsigned long eip,
                      int use_error_code, unsigned error_code)
 {
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             unsigned eip:32,
@@ -77,7 +77,7 @@ void __trace_pv_page_fault(unsigned long
 {
     unsigned long eip = guest_cpu_user_regs()->eip;
 
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             u32 eip, addr, error_code;
@@ -108,7 +108,7 @@ void __trace_pv_page_fault(unsigned long
 
 void __trace_trap_one_addr(unsigned event, unsigned long va)
 {
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         u32 d = va;
         __trace_var(event, 1, sizeof(d), &d);
@@ -123,7 +123,7 @@ void __trace_trap_one_addr(unsigned even
 void __trace_trap_two_addr(unsigned event, unsigned long va1,
                            unsigned long va2)
 {
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             u32 va1, va2;
@@ -156,7 +156,7 @@ void __trace_ptwr_emulation(unsigned lon
      * cases, "unsigned long" is the size of a guest virtual address.
      */
 
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             l1_pgentry_t pte;
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -124,7 +124,7 @@ static void show_guest_stack(struct vcpu
     if ( is_hvm_vcpu(v) )
         return;
 
-    if ( is_pv_32on64_vcpu(v) )
+    if ( is_pv_32bit_vcpu(v) )
     {
         compat_show_guest_stack(v, regs, debug_stack_lines);
         return;
@@ -2382,7 +2382,7 @@ static int emulate_privileged_op(struct 
         {
             unsigned long mfn;
             
-            if ( !is_pv_32on64_vcpu(v) )
+            if ( !is_pv_32bit_domain(currd) )
             {
                 mfn = pagetable_get_pfn(v->arch.guest_table);
                 *reg = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
@@ -2452,7 +2452,7 @@ static int emulate_privileged_op(struct 
             unsigned long gfn;
             struct page_info *page;
 
-            gfn = !is_pv_32on64_vcpu(v)
+            gfn = !is_pv_32bit_domain(currd)
                 ? xen_cr3_to_pfn(*reg) : compat_cr3_to_pfn(*reg);
             page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
             if ( page )
@@ -2504,19 +2504,19 @@ static int emulate_privileged_op(struct 
         switch ( regs->_ecx )
         {
         case MSR_FS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             wrfsbase(msr_content);
             v->arch.pv_vcpu.fs_base = msr_content;
             break;
         case MSR_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             wrgsbase(msr_content);
             v->arch.pv_vcpu.gs_base_kernel = msr_content;
             break;
         case MSR_SHADOW_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
                 goto fail;
@@ -2675,18 +2675,18 @@ static int emulate_privileged_op(struct 
         switch ( regs->_ecx )
         {
         case MSR_FS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             val = cpu_has_fsgsbase ? __rdfsbase() : v->arch.pv_vcpu.fs_base;
             goto rdmsr_writeback;
         case MSR_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             val = cpu_has_fsgsbase ? __rdgsbase()
                                    : v->arch.pv_vcpu.gs_base_kernel;
             goto rdmsr_writeback;
         case MSR_SHADOW_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             val = v->arch.pv_vcpu.gs_base_user;
             goto rdmsr_writeback;
@@ -3201,7 +3201,7 @@ void do_general_protection(struct cpu_us
             return;
         }
     }
-    else if ( is_pv_32on64_vcpu(v) && regs->error_code )
+    else if ( is_pv_32bit_vcpu(v) && regs->error_code )
     {
         emulate_gate_op(regs);
         return;
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -15,7 +15,6 @@
 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
 #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
-#define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
 
 #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
         d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
--- a/xen/include/asm-x86/ldt.h
+++ b/xen/include/asm-x86/ldt.h
@@ -15,7 +15,7 @@ static inline void load_LDT(struct vcpu 
     }
     else
     {
-        desc = (!is_pv_32on64_vcpu(v)
+        desc = (!is_pv_32bit_vcpu(v)
                 ? this_cpu(gdt_table) : this_cpu(compat_gdt_table))
                + LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY;
         _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt);
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -285,7 +285,7 @@ static inline int tmem_get_tmemop_from_c
 #ifdef CONFIG_COMPAT
     if ( has_hvm_container_vcpu(current) ?
          hvm_guest_x86_mode(current) != 8 :
-         is_pv_32on64_vcpu(current) )
+         is_pv_32bit_vcpu(current) )
     {
         int rc;
         enum XLAT_tmem_op_u u;



[-- Attachment #2: x86-drop-is_32on64_vcpu.patch --]
[-- Type: text/plain, Size: 9396 bytes --]

x86: drop is_pv_32on64_vcpu()

... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.

In a few cases this includes an additional is_pv_32bit_vcpu() ->
is_pv_32bit_domain() conversion.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -1339,7 +1339,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
             mctelem_cookie_t cookie = ID2COOKIE(mc_fetch.nat->fetch_id);
             mctelem_ack(which, cookie);
         } else {
-            if (!is_pv_32on64_vcpu(v)
+            if (!is_pv_32bit_vcpu(v)
                 ? guest_handle_is_null(mc_fetch.nat->data)
                 : compat_handle_is_null(mc_fetch.cmp->data))
                 return x86_mcerr("do_mca fetch: guest buffer "
@@ -1347,7 +1347,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
 
             if ((mctc = mctelem_consume_oldest_begin(which))) {
                 struct mc_info *mcip = mctelem_dataptr(mctc);
-                if (!is_pv_32on64_vcpu(v)
+                if (!is_pv_32bit_vcpu(v)
                     ? copy_to_guest(mc_fetch.nat->data, mcip, 1)
                     : copy_to_compat(mc_fetch.cmp->data,
                                      mcip, 1)) {
@@ -1378,7 +1378,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
         mc_physcpuinfo.nat = &op->u.mc_physcpuinfo;
         nlcpu = num_online_cpus();
 
-        if (!is_pv_32on64_vcpu(v)
+        if (!is_pv_32bit_vcpu(v)
             ? !guest_handle_is_null(mc_physcpuinfo.nat->info)
             : !compat_handle_is_null(mc_physcpuinfo.cmp->info)) {
             if (mc_physcpuinfo.nat->ncpus <= 0)
@@ -1389,7 +1389,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
             if (log_cpus == NULL)
                 return x86_mcerr("do_mca cpuinfo", -ENOMEM);
             on_each_cpu(do_mc_get_cpu_info, log_cpus, 1);
-            if (!is_pv_32on64_vcpu(v)
+            if (!is_pv_32bit_vcpu(v)
                 ? copy_to_guest(mc_physcpuinfo.nat->info,
                                 log_cpus, nlcpu)
                 : copy_to_compat(mc_physcpuinfo.cmp->info,
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -496,7 +496,7 @@ int vcpu_initialise(struct vcpu *v)
 
 void vcpu_destroy(struct vcpu *v)
 {
-    if ( is_pv_32on64_vcpu(v) )
+    if ( is_pv_32bit_vcpu(v) )
         release_compat_l4(v);
 
     vcpu_destroy_fpu(v);
@@ -1705,7 +1705,7 @@ unsigned long hypercall_create_continuat
             curr->arch.hvm_vcpu.hcall_preempted = 1;
 
         if ( is_pv_vcpu(curr) ?
-             !is_pv_32on64_vcpu(curr) :
+             !is_pv_32bit_vcpu(curr) :
              curr->arch.hvm_vcpu.hcall_64bit )
         {
             for ( i = 0; *p != '\0'; i++ )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2652,7 +2652,7 @@ int vcpu_destroy_pagetables(struct vcpu 
     if ( rc )
         return rc;
 
-    if ( is_pv_32on64_vcpu(v) )
+    if ( is_pv_32bit_vcpu(v) )
     {
         l4tab = map_domain_page(mfn);
         mfn = l4e_get_pfn(*l4tab);
--- a/xen/arch/x86/trace.c
+++ b/xen/arch/x86/trace.c
@@ -11,7 +11,7 @@ void __trace_hypercall_entry(void)
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     unsigned long args[6];
 
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         args[0] = regs->ebx;
         args[1] = regs->ecx;
@@ -36,7 +36,7 @@ void __trace_hypercall_entry(void)
 void __trace_pv_trap(int trapnr, unsigned long eip,
                      int use_error_code, unsigned error_code)
 {
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             unsigned eip:32,
@@ -77,7 +77,7 @@ void __trace_pv_page_fault(unsigned long
 {
     unsigned long eip = guest_cpu_user_regs()->eip;
 
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             u32 eip, addr, error_code;
@@ -108,7 +108,7 @@ void __trace_pv_page_fault(unsigned long
 
 void __trace_trap_one_addr(unsigned event, unsigned long va)
 {
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         u32 d = va;
         __trace_var(event, 1, sizeof(d), &d);
@@ -123,7 +123,7 @@ void __trace_trap_one_addr(unsigned even
 void __trace_trap_two_addr(unsigned event, unsigned long va1,
                            unsigned long va2)
 {
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             u32 va1, va2;
@@ -156,7 +156,7 @@ void __trace_ptwr_emulation(unsigned lon
      * cases, "unsigned long" is the size of a guest virtual address.
      */
 
-    if ( is_pv_32on64_vcpu(current) )
+    if ( is_pv_32bit_vcpu(current) )
     {
         struct __packed {
             l1_pgentry_t pte;
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -124,7 +124,7 @@ static void show_guest_stack(struct vcpu
     if ( is_hvm_vcpu(v) )
         return;
 
-    if ( is_pv_32on64_vcpu(v) )
+    if ( is_pv_32bit_vcpu(v) )
     {
         compat_show_guest_stack(v, regs, debug_stack_lines);
         return;
@@ -2382,7 +2382,7 @@ static int emulate_privileged_op(struct 
         {
             unsigned long mfn;
             
-            if ( !is_pv_32on64_vcpu(v) )
+            if ( !is_pv_32bit_domain(currd) )
             {
                 mfn = pagetable_get_pfn(v->arch.guest_table);
                 *reg = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn));
@@ -2452,7 +2452,7 @@ static int emulate_privileged_op(struct 
             unsigned long gfn;
             struct page_info *page;
 
-            gfn = !is_pv_32on64_vcpu(v)
+            gfn = !is_pv_32bit_domain(currd)
                 ? xen_cr3_to_pfn(*reg) : compat_cr3_to_pfn(*reg);
             page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
             if ( page )
@@ -2504,19 +2504,19 @@ static int emulate_privileged_op(struct 
         switch ( regs->_ecx )
         {
         case MSR_FS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             wrfsbase(msr_content);
             v->arch.pv_vcpu.fs_base = msr_content;
             break;
         case MSR_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             wrgsbase(msr_content);
             v->arch.pv_vcpu.gs_base_kernel = msr_content;
             break;
         case MSR_SHADOW_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
                 goto fail;
@@ -2675,18 +2675,18 @@ static int emulate_privileged_op(struct 
         switch ( regs->_ecx )
         {
         case MSR_FS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             val = cpu_has_fsgsbase ? __rdfsbase() : v->arch.pv_vcpu.fs_base;
             goto rdmsr_writeback;
         case MSR_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             val = cpu_has_fsgsbase ? __rdgsbase()
                                    : v->arch.pv_vcpu.gs_base_kernel;
             goto rdmsr_writeback;
         case MSR_SHADOW_GS_BASE:
-            if ( is_pv_32on64_vcpu(v) )
+            if ( is_pv_32bit_domain(currd) )
                 goto fail;
             val = v->arch.pv_vcpu.gs_base_user;
             goto rdmsr_writeback;
@@ -3201,7 +3201,7 @@ void do_general_protection(struct cpu_us
             return;
         }
     }
-    else if ( is_pv_32on64_vcpu(v) && regs->error_code )
+    else if ( is_pv_32bit_vcpu(v) && regs->error_code )
     {
         emulate_gate_op(regs);
         return;
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -15,7 +15,6 @@
 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
 #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
-#define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
 
 #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
         d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
--- a/xen/include/asm-x86/ldt.h
+++ b/xen/include/asm-x86/ldt.h
@@ -15,7 +15,7 @@ static inline void load_LDT(struct vcpu 
     }
     else
     {
-        desc = (!is_pv_32on64_vcpu(v)
+        desc = (!is_pv_32bit_vcpu(v)
                 ? this_cpu(gdt_table) : this_cpu(compat_gdt_table))
                + LDT_ENTRY - FIRST_RESERVED_GDT_ENTRY;
         _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, SYS_DESC_ldt);
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -285,7 +285,7 @@ static inline int tmem_get_tmemop_from_c
 #ifdef CONFIG_COMPAT
     if ( has_hvm_container_vcpu(current) ?
          hvm_guest_x86_mode(current) != 8 :
-         is_pv_32on64_vcpu(current) )
+         is_pv_32bit_vcpu(current) )
     {
         int rc;
         enum XLAT_tmem_op_u u;

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 2/3] x86: drop is_pv_32on64_domain()
  2015-06-23 15:14 [PATCH 0/3] x86: further is_..._...() adjustments Jan Beulich
  2015-06-23 15:18 ` [PATCH 1/3] x86: drop is_pv_32on64_vcpu() Jan Beulich
@ 2015-06-23 15:19 ` Jan Beulich
  2015-06-23 15:43   ` Andrew Cooper
                     ` (2 more replies)
  2015-06-23 15:20 ` [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it Jan Beulich
  2 siblings, 3 replies; 14+ messages in thread
From: Jan Beulich @ 2015-06-23 15:19 UTC (permalink / raw)
  To: xen-devel
  Cc: Keir Fraser, George Dunlap, Andrew Cooper, Ian Jackson,
	Tim Deegan, Ian Campbell

[-- Attachment #1: Type: text/plain, Size: 27208 bytes --]

... as being identical to is_pv_32bit_domain() after the x86-32
removal.

In a few cases this includes no longer open-coding is_pv_32bit_vcpu().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -367,7 +367,7 @@ int switch_native(struct domain *d)
 
     if ( !may_switch_mode(d) )
         return -EACCES;
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
         return 0;
 
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
@@ -392,7 +392,7 @@ int switch_compat(struct domain *d)
 
     if ( !may_switch_mode(d) )
         return -EACCES;
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         return 0;
 
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
@@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v)
 
     v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
-    rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0;
+    rc = is_pv_32bit_domain(d) ? setup_compat_l4(v) : 0;
  done:
     if ( rc )
     {
@@ -689,7 +689,7 @@ unsigned long pv_guest_cr4_fixup(const s
     hv_cr4_mask = ~X86_CR4_TSD;
     if ( cpu_has_de )
         hv_cr4_mask &= ~X86_CR4_DE;
-    if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) )
+    if ( cpu_has_fsgsbase && !is_pv_32bit_vcpu(v) )
         hv_cr4_mask &= ~X86_CR4_FSGSBASE;
     if ( cpu_has_xsave )
         hv_cr4_mask &= ~X86_CR4_OSXSAVE;
@@ -721,7 +721,7 @@ int arch_set_info_guest(
 
     /* The context is a compat-mode one if the target domain is compat-mode;
      * we expect the tools to DTRT even in compat-mode callers. */
-    compat = is_pv_32on64_domain(d);
+    compat = is_pv_32bit_domain(d);
 
 #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
     flags = c(flags);
@@ -1195,7 +1195,7 @@ static void load_segments(struct vcpu *n
             all_segs_okay &= loadsegment(gs, uregs->gs);
     }
 
-    if ( !is_pv_32on64_domain(n->domain) )
+    if ( !is_pv_32bit_vcpu(n) )
     {
         /* This can only be non-zero if selector is NULL. */
         if ( n->arch.pv_vcpu.fs_base )
@@ -1224,7 +1224,7 @@ static void load_segments(struct vcpu *n
             (unsigned long *)pv->kernel_sp;
         unsigned long cs_and_mask, rflags;
 
-        if ( is_pv_32on64_domain(n->domain) )
+        if ( is_pv_32bit_vcpu(n) )
         {
             unsigned int *esp = ring_1(regs) ?
                                 (unsigned int *)regs->rsp :
@@ -1340,7 +1340,7 @@ static void save_segments(struct vcpu *v
     if ( regs->es )
         dirty_segment_mask |= DIRTY_ES;
 
-    if ( regs->fs || is_pv_32on64_domain(v->domain) )
+    if ( regs->fs || is_pv_32bit_vcpu(v) )
     {
         dirty_segment_mask |= DIRTY_FS;
         v->arch.pv_vcpu.fs_base = 0; /* != 0 selector kills fs_base */
@@ -1350,7 +1350,7 @@ static void save_segments(struct vcpu *v
         dirty_segment_mask |= DIRTY_FS_BASE;
     }
 
-    if ( regs->gs || is_pv_32on64_domain(v->domain) )
+    if ( regs->gs || is_pv_32bit_vcpu(v) )
     {
         dirty_segment_mask |= DIRTY_GS;
         v->arch.pv_vcpu.gs_base_user = 0; /* != 0 selector kills gs_base_user */
@@ -1483,8 +1483,8 @@ static void __context_switch(void)
 
     psr_ctxt_switch_to(nd);
 
-    gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) :
-                                     per_cpu(compat_gdt_table, cpu);
+    gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) :
+                                    per_cpu(compat_gdt_table, cpu);
     if ( need_full_gdt(nd) )
     {
         unsigned long mfn = virt_to_mfn(gdt);
@@ -1568,7 +1568,7 @@ void context_switch(struct vcpu *prev, s
         if ( is_pv_domain(nextd) &&
              (is_idle_domain(prevd) ||
               has_hvm_container_domain(prevd) ||
-              is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) )
+              is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) )
         {
             uint64_t efer = read_efer();
             if ( !(efer & EFER_SCE) )
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -293,7 +293,7 @@ static unsigned long __init compute_dom0
     avail -= (d->max_vcpus - 1UL)
              << get_order_from_bytes(sizeof(struct vcpu));
     /* ...and compat_l4's, if needed. */
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         avail -= d->max_vcpus - 1;
 
     /* Reserve memory for iommu_dom0_init() (rough estimate). */
@@ -608,7 +608,7 @@ static __init void dom0_update_physmap(s
         BUG_ON(rc);
         return;
     }
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
         ((unsigned long *)vphysmap_s)[pfn] = mfn;
     else
         ((unsigned int *)vphysmap_s)[pfn] = mfn;
@@ -718,7 +718,7 @@ static __init void mark_pv_pt_pages_rdon
 
         /* Top-level p.t. is pinned. */
         if ( (page->u.inuse.type_info & PGT_type_mask) ==
-             (!is_pv_32on64_domain(d) ?
+             (!is_pv_32bit_domain(d) ?
               PGT_l4_page_table : PGT_l3_page_table) )
         {
             page->count_info        += 1;
@@ -1048,7 +1048,7 @@ int __init construct_dom0(
         vinitrd_end    = vinitrd_start + initrd_len;
         vphysmap_start = round_pgup(vinitrd_end);
     }
-    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
+    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32bit_domain(d) ?
                                                      sizeof(unsigned long) :
                                                      sizeof(unsigned int)));
     if ( parms.p2m_base != UNSET_ADDR )
@@ -1076,9 +1076,9 @@ int __init construct_dom0(
 #define NR(_l,_h,_s) \
     (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
        ((_l) & ~((1UL<<(_s))-1))) >> (_s))
-        if ( (!is_pv_32on64_domain(d) + /* # L4 */
+        if ( (!is_pv_32bit_domain(d) + /* # L4 */
               NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
-              (!is_pv_32on64_domain(d) ?
+              (!is_pv_32bit_domain(d) ?
                NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
                4) + /* # compat L2 */
               NR(v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
@@ -1176,7 +1176,7 @@ int __init construct_dom0(
         mpt_alloc -= PAGE_ALIGN(initrd_len);
 
     /* Overlap with Xen protected area? */
-    if ( !is_pv_32on64_domain(d) ?
+    if ( !is_pv_32bit_domain(d) ?
          ((v_start < HYPERVISOR_VIRT_END) &&
           (v_end > HYPERVISOR_VIRT_START)) :
          (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
@@ -1186,14 +1186,14 @@ int __init construct_dom0(
         goto out;
     }
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
     {
         v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
         v->arch.pv_vcpu.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
     }
 
     /* WARNING: The new domain must have its 'processor' field filled in! */
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
     {
         maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
         l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
@@ -1211,7 +1211,7 @@ int __init construct_dom0(
     clear_page(l4tab);
     init_guest_l4_table(l4tab, d, 0);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
 
     l4tab += l4_table_offset(v_start);
@@ -1257,7 +1257,7 @@ int __init construct_dom0(
             mfn = pfn++;
         else
             mfn = initrd_mfn++;
-        *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ?
+        *l1tab = l1e_from_pfn(mfn, (!is_pv_32bit_domain(d) ?
                                     L1_PROT : COMPAT_L1_PROT));
         l1tab++;
 
@@ -1270,7 +1270,7 @@ int __init construct_dom0(
         }
     }
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
     {
         /* Ensure the first four L3 entries are all populated. */
         for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
@@ -1477,7 +1477,7 @@ int __init construct_dom0(
     if ( is_pvh_domain(d) )
         si->shared_info = shared_info_paddr;
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         xlat_start_info(si, XLAT_start_info_console_dom0);
 
     /* Return to idle domain's page tables. */
@@ -1499,10 +1499,10 @@ int __init construct_dom0(
      */
     regs = &v->arch.user_regs;
     regs->ds = regs->es = regs->fs = regs->gs =
-        !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
-    regs->ss = (!is_pv_32on64_domain(d) ?
+        !is_pv_32bit_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
+    regs->ss = (!is_pv_32bit_domain(d) ?
                 FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
-    regs->cs = (!is_pv_32on64_domain(d) ?
+    regs->cs = (!is_pv_32bit_domain(d) ?
                 FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
     regs->eip = parms.virt_entry;
     regs->esp = vstack_end;
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -349,7 +349,7 @@ long arch_do_domctl(
 
     case XEN_DOMCTL_get_address_size:
         domctl->u.address_size.size =
-            is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
+            is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
         copyback = 1;
         break;
 
@@ -1183,7 +1183,7 @@ void arch_get_info_guest(struct vcpu *v,
 {
     unsigned int i;
     const struct domain *d = v->domain;
-    bool_t compat = is_pv_32on64_domain(d);
+    bool_t compat = is_pv_32bit_domain(d);
 #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
 
     if ( !is_pv_domain(d) )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -163,9 +163,8 @@ static uint32_t base_disallow_mask;
 #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
 #define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
 
-#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
-                             base_disallow_mask :       \
-                             0xFFFFF198U)
+#define l3_disallow_mask(d) (!is_pv_32bit_domain(d) ? \
+                             base_disallow_mask : 0xFFFFF198U)
 
 #define L4_DISALLOW_MASK (base_disallow_mask)
 
@@ -985,7 +984,7 @@ get_page_from_l4e(
 #define adjust_guest_l1e(pl1e, d)                                            \
     do {                                                                     \
         if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&                \
-             likely(!is_pv_32on64_domain(d)) )                               \
+             likely(!is_pv_32bit_domain(d)) )                                \
         {                                                                    \
             /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */    \
             if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
@@ -1002,14 +1001,14 @@ get_page_from_l4e(
 #define adjust_guest_l2e(pl2e, d)                               \
     do {                                                        \
         if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) &&   \
-             likely(!is_pv_32on64_domain(d)) )                  \
+             likely(!is_pv_32bit_domain(d)) )                   \
             l2e_add_flags((pl2e), _PAGE_USER);                  \
     } while ( 0 )
 
 #define adjust_guest_l3e(pl3e, d)                                   \
     do {                                                            \
         if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )        \
-            l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \
+            l3e_add_flags((pl3e), likely(!is_pv_32bit_domain(d)) ?  \
                                          _PAGE_USER :               \
                                          _PAGE_USER|_PAGE_RW);      \
     } while ( 0 )
@@ -1017,13 +1016,13 @@ get_page_from_l4e(
 #define adjust_guest_l4e(pl4e, d)                               \
     do {                                                        \
         if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) &&   \
-             likely(!is_pv_32on64_domain(d)) )                  \
+             likely(!is_pv_32bit_domain(d)) )                   \
             l4e_add_flags((pl4e), _PAGE_USER);                  \
     } while ( 0 )
 
 #define unadjust_guest_l3e(pl3e, d)                                         \
     do {                                                                    \
-        if ( unlikely(is_pv_32on64_domain(d)) &&                            \
+        if ( unlikely(is_pv_32bit_domain(d)) &&                             \
              likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )                \
             l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);   \
     } while ( 0 )
@@ -1314,7 +1313,7 @@ static int alloc_l3_table(struct page_in
      * 512 entries must be valid/verified, which is most easily achieved
      * by clearing them out.
      */
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e));
 
     for ( i = page->nr_validated_ptes; i < L3_PAGETABLE_ENTRIES;
@@ -1391,7 +1390,7 @@ void init_guest_l4_table(l4_pgentry_t l4
         l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
-    if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
+    if ( zap_ro_mpt || is_pv_32bit_domain(d) || paging_mode_refcounts(d) )
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
 }
 
@@ -2707,7 +2706,7 @@ int new_guest_cr3(unsigned long mfn)
     int rc;
     unsigned long old_base_mfn;
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
     {
         unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
         l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
@@ -2856,7 +2855,7 @@ static inline int vcpumask_to_pcpumask(
     unsigned int vcpu_id, vcpu_bias, offs;
     unsigned long vmask;
     struct vcpu *v;
-    bool_t is_native = !is_pv_32on64_domain(d);
+    bool_t is_native = !is_pv_32bit_domain(d);
 
     cpumask_clear(pmask);
     for ( vmask = 0, offs = 0; ; ++offs)
@@ -5165,7 +5164,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
     ptwr_ctxt.ctxt.regs = regs;
     ptwr_ctxt.ctxt.force_writeback = 0;
     ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
-        is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
+        is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
     ptwr_ctxt.ctxt.swint_emulate = x86_swint_emulate_none;
     ptwr_ctxt.cr2 = addr;
     ptwr_ctxt.pte = pte;
@@ -5235,10 +5234,9 @@ static const struct x86_emulate_ops mmio
 int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
                           struct cpu_user_regs *regs)
 {
-    l1_pgentry_t      pte;
-    unsigned long     mfn;
-    unsigned int      addr_size = is_pv_32on64_domain(v->domain) ?
-                                  32 : BITS_PER_LONG;
+    l1_pgentry_t pte;
+    unsigned long mfn;
+    unsigned int addr_size = is_pv_32bit_vcpu(v) ? 32 : BITS_PER_LONG;
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = {
         .ctxt.regs = regs,
         .ctxt.addr_size = addr_size,
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2110,7 +2110,7 @@ void sh_destroy_shadow(struct domain *d,
            t == SH_type_fl1_pae_shadow ||
            t == SH_type_fl1_64_shadow  ||
            t == SH_type_monitor_table  ||
-           (is_pv_32on64_domain(d) && t == SH_type_l4_64_shadow) ||
+           (is_pv_32bit_domain(d) && t == SH_type_l4_64_shadow) ||
            (page_get_owner(mfn_to_page(backpointer(sp))) == d));
 
     /* The down-shifts here are so that the switch statement is on nice
@@ -2139,7 +2139,7 @@ void sh_destroy_shadow(struct domain *d,
         SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(d, smfn);
         break;
     case SH_type_l2h_64_shadow:
-        ASSERT(is_pv_32on64_domain(d));
+        ASSERT(is_pv_32bit_domain(d));
         /* Fall through... */
     case SH_type_l2_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(d, smfn);
@@ -3472,7 +3472,7 @@ static int sh_enable_log_dirty(struct do
     /* 32bit PV guests on 64bit xen behave like older 64bit linux: they
      * change an l4e instead of cr3 to switch tables.  Give them the
      * same optimization */
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
 
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -131,8 +131,8 @@ set_shadow_status(struct domain *d, mfn_
 
     ASSERT(mfn_to_page(smfn)->u.sh.head);
 
-    /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
-    if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
+    /* 32-bit PV guests don't own their l4 pages so can't get_page them */
+    if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow )
     {
         res = get_page(mfn_to_page(gmfn), d);
         ASSERT(res == 1);
@@ -159,8 +159,8 @@ delete_shadow_status(struct domain *d, m
                   d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
     ASSERT(mfn_to_page(smfn)->u.sh.head);
     shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn);
-    /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
-    if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
+    /* 32-bit PV guests don't own their l4 pages; see set_shadow_status */
+    if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow )
         put_page(mfn_to_page(gmfn));
 }
 
@@ -698,7 +698,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d)
          && is_pv_domain(d) )
     {
         sflags |= _PAGE_USER;
@@ -1346,8 +1346,8 @@ do {                                    
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
     {                                                                       \
         if ( (!(_xen))                                                      \
-             || !is_pv_32on64_domain(_dom)                                  \
-             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
+             || !is_pv_32bit_domain(_dom)                                   \
+             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow    \
              || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
         {                                                                   \
             (_sl2e) = _sp + _i;                                             \
@@ -1435,7 +1435,7 @@ void sh_install_xen_entries_in_l4(struct
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
-    if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) &&
+    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
          !VM_ASSIST(d, m2p_strict) )
     {
         /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */
@@ -1475,7 +1475,7 @@ static void sh_install_xen_entries_in_l2
 {
     shadow_l2e_t *sl2e;
 
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
         return;
 
     sl2e = sh_map_domain_page(sl2hmfn);
@@ -1620,9 +1620,9 @@ sh_make_monitor_table(struct vcpu *v)
             l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
             sh_unmap_domain_page(l3e);
 
-            if ( is_pv_32on64_domain(d) )
+            if ( is_pv_32bit_domain(d) )
             {
-                /* For 32-on-64 PV guests, we need to map the 32-bit Xen
+                /* For 32-bit PV guests, we need to map the 32-bit Xen
                  * area into its usual VAs in the monitor tables */
                 m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m3mfn)->shadow_flags = 3;
@@ -1740,7 +1740,7 @@ static shadow_l2e_t * shadow_get_and_cre
         unsigned int t = SH_type_l2_shadow;
 
         /* Tag compat L2 containing hypervisor (m2p) mappings */
-        if ( is_pv_32on64_domain(v->domain) &&
+        if ( is_pv_32bit_vcpu(v) &&
              guest_l4_table_offset(gw->va) == 0 &&
              guest_l3_table_offset(gw->va) == 3 )
             t = SH_type_l2h_shadow;
@@ -2043,7 +2043,7 @@ void sh_destroy_monitor_table(struct vcp
         sh_unmap_domain_page(l3e);
         shadow_free(d, m3mfn);
 
-        if ( is_pv_32on64_domain(d) )
+        if ( is_pv_32bit_domain(d) )
         {
             /* Need to destroy the l3 and l2 monitor pages that map the
              * Xen VAs at 3GB-4GB */
@@ -3963,7 +3963,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                    (unsigned long)pagetable_get_pfn(v->arch.guest_table));
 
 #if GUEST_PAGING_LEVELS == 4
-    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) )
+    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) )
         gmfn = pagetable_get_mfn(v->arch.guest_table_user);
     else
 #endif
@@ -4078,7 +4078,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
     if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
         flush_tlb_mask(d->domain_dirty_cpumask);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
-    if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
+    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
     {
         mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
 
@@ -5104,7 +5104,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
             gmfn = get_shadow_status(d, get_gfn_query_unlocked(
                                         d, gfn_x(gfn), &p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
-                                       is_pv_32on64_domain(d))
+                                       is_pv_32bit_domain(d))
                                       && !shadow_mode_external(d)
                                       && (guest_index(gl3e) % 4) == 3)
                                      ? SH_type_l2h_shadow
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -951,7 +951,7 @@ void pv_cpuid(struct cpu_user_regs *regs
             __clear_bit(X86_FEATURE_LM % 32, &d);
             __clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
         }
-        if ( is_pv_32on64_domain(currd) &&
+        if ( is_pv_32bit_domain(currd) &&
              boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
             __clear_bit(X86_FEATURE_SYSCALL % 32, &d);
         __clear_bit(X86_FEATURE_PAGE1GB % 32, &d);
@@ -3675,7 +3675,7 @@ long register_guest_nmi_callback(unsigne
 
     t->vector  = TRAP_nmi;
     t->flags   = 0;
-    t->cs      = (is_pv_32on64_domain(d) ?
+    t->cs      = (is_pv_32bit_domain(d) ?
                   FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS);
     t->address = address;
     TI_SET_IF(t, 1);
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1188,7 +1188,7 @@ int handle_memadd_fault(unsigned long ad
     unsigned long mfn, idle_index;
     int ret = 0;
 
-    if (!is_pv_32on64_domain(d))
+    if (!is_pv_32bit_domain(d))
         return 0;
 
     if ( (addr < HYPERVISOR_COMPAT_VIRT_START(d)) ||
@@ -1247,7 +1247,7 @@ unmap:
 
 void domain_set_alloc_bitsize(struct domain *d)
 {
-    if ( !is_pv_32on64_domain(d) ||
+    if ( !is_pv_32bit_domain(d) ||
          (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) ||
          d->arch.physaddr_bitsize > 0 )
         return;
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -495,7 +495,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
             break;
 
 #ifdef CONFIG_COMPAT
-        if ( !is_pv_32on64_domain(d) )
+        if ( !is_pv_32bit_domain(d) )
             ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
         else
             ret = copy_from_guest(c.cmp,
@@ -901,7 +901,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         vcpu_unpause(v);
 
 #ifdef CONFIG_COMPAT
-        if ( !is_pv_32on64_domain(d) )
+        if ( !is_pv_32bit_domain(d) )
             ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
         else
             ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -872,7 +872,7 @@ static int kexec_load_slot(struct kexec_
 static uint16_t kexec_load_v1_arch(void)
 {
 #ifdef CONFIG_X86
-    return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64;
+    return is_pv_32bit_domain(hardware_domain) ? EM_386 : EM_X86_64;
 #else
     return EM_NONE;
 #endif
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -219,7 +219,7 @@ static int alloc_xenoprof_struct(
     bufsize = sizeof(struct xenoprof_buf);
     i = sizeof(struct event_log);
 #ifdef CONFIG_COMPAT
-    d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d);
+    d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d);
     if ( XENOPROF_COMPAT(d->xenoprof) )
     {
         bufsize = sizeof(struct compat_oprof_buf);
--- a/xen/include/asm-x86/desc.h
+++ b/xen/include/asm-x86/desc.h
@@ -65,7 +65,7 @@
  */
 #define guest_gate_selector_okay(d, sel)                                \
     ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
-     ((sel) == (!is_pv_32on64_domain(d) ?                               \
+     ((sel) == (!is_pv_32bit_domain(d) ?                                \
                 FLAT_KERNEL_CS :                /* Xen default seg? */  \
                 FLAT_COMPAT_KERNEL_CS)) ||                              \
      ((sel) & 4))                               /* LDT seg? */
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -14,7 +14,6 @@
 #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
-#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
 
 #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
         d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)



[-- Attachment #2: x86-drop-is_32on64_domain.patch --]
[-- Type: text/plain, Size: 27239 bytes --]

x86: drop is_pv_32on64_domain()

... as being identical to is_pv_32bit_domain() after the x86-32
removal.

In a few cases this includes no longer open-coding is_pv_32bit_vcpu().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -367,7 +367,7 @@ int switch_native(struct domain *d)
 
     if ( !may_switch_mode(d) )
         return -EACCES;
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
         return 0;
 
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
@@ -392,7 +392,7 @@ int switch_compat(struct domain *d)
 
     if ( !may_switch_mode(d) )
         return -EACCES;
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         return 0;
 
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
@@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v)
 
     v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
-    rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0;
+    rc = is_pv_32bit_domain(d) ? setup_compat_l4(v) : 0;
  done:
     if ( rc )
     {
@@ -689,7 +689,7 @@ unsigned long pv_guest_cr4_fixup(const s
     hv_cr4_mask = ~X86_CR4_TSD;
     if ( cpu_has_de )
         hv_cr4_mask &= ~X86_CR4_DE;
-    if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) )
+    if ( cpu_has_fsgsbase && !is_pv_32bit_vcpu(v) )
         hv_cr4_mask &= ~X86_CR4_FSGSBASE;
     if ( cpu_has_xsave )
         hv_cr4_mask &= ~X86_CR4_OSXSAVE;
@@ -721,7 +721,7 @@ int arch_set_info_guest(
 
     /* The context is a compat-mode one if the target domain is compat-mode;
      * we expect the tools to DTRT even in compat-mode callers. */
-    compat = is_pv_32on64_domain(d);
+    compat = is_pv_32bit_domain(d);
 
 #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
     flags = c(flags);
@@ -1195,7 +1195,7 @@ static void load_segments(struct vcpu *n
             all_segs_okay &= loadsegment(gs, uregs->gs);
     }
 
-    if ( !is_pv_32on64_domain(n->domain) )
+    if ( !is_pv_32bit_vcpu(n) )
     {
         /* This can only be non-zero if selector is NULL. */
         if ( n->arch.pv_vcpu.fs_base )
@@ -1224,7 +1224,7 @@ static void load_segments(struct vcpu *n
             (unsigned long *)pv->kernel_sp;
         unsigned long cs_and_mask, rflags;
 
-        if ( is_pv_32on64_domain(n->domain) )
+        if ( is_pv_32bit_vcpu(n) )
         {
             unsigned int *esp = ring_1(regs) ?
                                 (unsigned int *)regs->rsp :
@@ -1340,7 +1340,7 @@ static void save_segments(struct vcpu *v
     if ( regs->es )
         dirty_segment_mask |= DIRTY_ES;
 
-    if ( regs->fs || is_pv_32on64_domain(v->domain) )
+    if ( regs->fs || is_pv_32bit_vcpu(v) )
     {
         dirty_segment_mask |= DIRTY_FS;
         v->arch.pv_vcpu.fs_base = 0; /* != 0 selector kills fs_base */
@@ -1350,7 +1350,7 @@ static void save_segments(struct vcpu *v
         dirty_segment_mask |= DIRTY_FS_BASE;
     }
 
-    if ( regs->gs || is_pv_32on64_domain(v->domain) )
+    if ( regs->gs || is_pv_32bit_vcpu(v) )
     {
         dirty_segment_mask |= DIRTY_GS;
         v->arch.pv_vcpu.gs_base_user = 0; /* != 0 selector kills gs_base_user */
@@ -1483,8 +1483,8 @@ static void __context_switch(void)
 
     psr_ctxt_switch_to(nd);
 
-    gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) :
-                                     per_cpu(compat_gdt_table, cpu);
+    gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) :
+                                    per_cpu(compat_gdt_table, cpu);
     if ( need_full_gdt(nd) )
     {
         unsigned long mfn = virt_to_mfn(gdt);
@@ -1568,7 +1568,7 @@ void context_switch(struct vcpu *prev, s
         if ( is_pv_domain(nextd) &&
              (is_idle_domain(prevd) ||
               has_hvm_container_domain(prevd) ||
-              is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) )
+              is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) )
         {
             uint64_t efer = read_efer();
             if ( !(efer & EFER_SCE) )
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -293,7 +293,7 @@ static unsigned long __init compute_dom0
     avail -= (d->max_vcpus - 1UL)
              << get_order_from_bytes(sizeof(struct vcpu));
     /* ...and compat_l4's, if needed. */
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         avail -= d->max_vcpus - 1;
 
     /* Reserve memory for iommu_dom0_init() (rough estimate). */
@@ -608,7 +608,7 @@ static __init void dom0_update_physmap(s
         BUG_ON(rc);
         return;
     }
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
         ((unsigned long *)vphysmap_s)[pfn] = mfn;
     else
         ((unsigned int *)vphysmap_s)[pfn] = mfn;
@@ -718,7 +718,7 @@ static __init void mark_pv_pt_pages_rdon
 
         /* Top-level p.t. is pinned. */
         if ( (page->u.inuse.type_info & PGT_type_mask) ==
-             (!is_pv_32on64_domain(d) ?
+             (!is_pv_32bit_domain(d) ?
               PGT_l4_page_table : PGT_l3_page_table) )
         {
             page->count_info        += 1;
@@ -1048,7 +1048,7 @@ int __init construct_dom0(
         vinitrd_end    = vinitrd_start + initrd_len;
         vphysmap_start = round_pgup(vinitrd_end);
     }
-    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
+    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32bit_domain(d) ?
                                                      sizeof(unsigned long) :
                                                      sizeof(unsigned int)));
     if ( parms.p2m_base != UNSET_ADDR )
@@ -1076,9 +1076,9 @@ int __init construct_dom0(
 #define NR(_l,_h,_s) \
     (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
        ((_l) & ~((1UL<<(_s))-1))) >> (_s))
-        if ( (!is_pv_32on64_domain(d) + /* # L4 */
+        if ( (!is_pv_32bit_domain(d) + /* # L4 */
               NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
-              (!is_pv_32on64_domain(d) ?
+              (!is_pv_32bit_domain(d) ?
                NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
                4) + /* # compat L2 */
               NR(v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
@@ -1176,7 +1176,7 @@ int __init construct_dom0(
         mpt_alloc -= PAGE_ALIGN(initrd_len);
 
     /* Overlap with Xen protected area? */
-    if ( !is_pv_32on64_domain(d) ?
+    if ( !is_pv_32bit_domain(d) ?
          ((v_start < HYPERVISOR_VIRT_END) &&
           (v_end > HYPERVISOR_VIRT_START)) :
          (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
@@ -1186,14 +1186,14 @@ int __init construct_dom0(
         goto out;
     }
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
     {
         v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
         v->arch.pv_vcpu.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
     }
 
     /* WARNING: The new domain must have its 'processor' field filled in! */
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
     {
         maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
         l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
@@ -1211,7 +1211,7 @@ int __init construct_dom0(
     clear_page(l4tab);
     init_guest_l4_table(l4tab, d, 0);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
 
     l4tab += l4_table_offset(v_start);
@@ -1257,7 +1257,7 @@ int __init construct_dom0(
             mfn = pfn++;
         else
             mfn = initrd_mfn++;
-        *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ?
+        *l1tab = l1e_from_pfn(mfn, (!is_pv_32bit_domain(d) ?
                                     L1_PROT : COMPAT_L1_PROT));
         l1tab++;
 
@@ -1270,7 +1270,7 @@ int __init construct_dom0(
         }
     }
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
     {
         /* Ensure the first four L3 entries are all populated. */
         for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
@@ -1477,7 +1477,7 @@ int __init construct_dom0(
     if ( is_pvh_domain(d) )
         si->shared_info = shared_info_paddr;
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         xlat_start_info(si, XLAT_start_info_console_dom0);
 
     /* Return to idle domain's page tables. */
@@ -1499,10 +1499,10 @@ int __init construct_dom0(
      */
     regs = &v->arch.user_regs;
     regs->ds = regs->es = regs->fs = regs->gs =
-        !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
-    regs->ss = (!is_pv_32on64_domain(d) ?
+        !is_pv_32bit_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
+    regs->ss = (!is_pv_32bit_domain(d) ?
                 FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
-    regs->cs = (!is_pv_32on64_domain(d) ?
+    regs->cs = (!is_pv_32bit_domain(d) ?
                 FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
     regs->eip = parms.virt_entry;
     regs->esp = vstack_end;
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -349,7 +349,7 @@ long arch_do_domctl(
 
     case XEN_DOMCTL_get_address_size:
         domctl->u.address_size.size =
-            is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
+            is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
         copyback = 1;
         break;
 
@@ -1183,7 +1183,7 @@ void arch_get_info_guest(struct vcpu *v,
 {
     unsigned int i;
     const struct domain *d = v->domain;
-    bool_t compat = is_pv_32on64_domain(d);
+    bool_t compat = is_pv_32bit_domain(d);
 #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
 
     if ( !is_pv_domain(d) )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -163,9 +163,8 @@ static uint32_t base_disallow_mask;
 #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
 #define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
 
-#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
-                             base_disallow_mask :       \
-                             0xFFFFF198U)
+#define l3_disallow_mask(d) (!is_pv_32bit_domain(d) ? \
+                             base_disallow_mask : 0xFFFFF198U)
 
 #define L4_DISALLOW_MASK (base_disallow_mask)
 
@@ -985,7 +984,7 @@ get_page_from_l4e(
 #define adjust_guest_l1e(pl1e, d)                                            \
     do {                                                                     \
         if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&                \
-             likely(!is_pv_32on64_domain(d)) )                               \
+             likely(!is_pv_32bit_domain(d)) )                                \
         {                                                                    \
             /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */    \
             if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
@@ -1002,14 +1001,14 @@ get_page_from_l4e(
 #define adjust_guest_l2e(pl2e, d)                               \
     do {                                                        \
         if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) &&   \
-             likely(!is_pv_32on64_domain(d)) )                  \
+             likely(!is_pv_32bit_domain(d)) )                   \
             l2e_add_flags((pl2e), _PAGE_USER);                  \
     } while ( 0 )
 
 #define adjust_guest_l3e(pl3e, d)                                   \
     do {                                                            \
         if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )        \
-            l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \
+            l3e_add_flags((pl3e), likely(!is_pv_32bit_domain(d)) ?  \
                                          _PAGE_USER :               \
                                          _PAGE_USER|_PAGE_RW);      \
     } while ( 0 )
@@ -1017,13 +1016,13 @@ get_page_from_l4e(
 #define adjust_guest_l4e(pl4e, d)                               \
     do {                                                        \
         if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) &&   \
-             likely(!is_pv_32on64_domain(d)) )                  \
+             likely(!is_pv_32bit_domain(d)) )                   \
             l4e_add_flags((pl4e), _PAGE_USER);                  \
     } while ( 0 )
 
 #define unadjust_guest_l3e(pl3e, d)                                         \
     do {                                                                    \
-        if ( unlikely(is_pv_32on64_domain(d)) &&                            \
+        if ( unlikely(is_pv_32bit_domain(d)) &&                             \
              likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )                \
             l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);   \
     } while ( 0 )
@@ -1314,7 +1313,7 @@ static int alloc_l3_table(struct page_in
      * 512 entries must be valid/verified, which is most easily achieved
      * by clearing them out.
      */
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e));
 
     for ( i = page->nr_validated_ptes; i < L3_PAGETABLE_ENTRIES;
@@ -1391,7 +1390,7 @@ void init_guest_l4_table(l4_pgentry_t l4
         l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
-    if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
+    if ( zap_ro_mpt || is_pv_32bit_domain(d) || paging_mode_refcounts(d) )
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
 }
 
@@ -2707,7 +2706,7 @@ int new_guest_cr3(unsigned long mfn)
     int rc;
     unsigned long old_base_mfn;
 
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
     {
         unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
         l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
@@ -2856,7 +2855,7 @@ static inline int vcpumask_to_pcpumask(
     unsigned int vcpu_id, vcpu_bias, offs;
     unsigned long vmask;
     struct vcpu *v;
-    bool_t is_native = !is_pv_32on64_domain(d);
+    bool_t is_native = !is_pv_32bit_domain(d);
 
     cpumask_clear(pmask);
     for ( vmask = 0, offs = 0; ; ++offs)
@@ -5165,7 +5164,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
     ptwr_ctxt.ctxt.regs = regs;
     ptwr_ctxt.ctxt.force_writeback = 0;
     ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
-        is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
+        is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
     ptwr_ctxt.ctxt.swint_emulate = x86_swint_emulate_none;
     ptwr_ctxt.cr2 = addr;
     ptwr_ctxt.pte = pte;
@@ -5235,10 +5234,9 @@ static const struct x86_emulate_ops mmio
 int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
                           struct cpu_user_regs *regs)
 {
-    l1_pgentry_t      pte;
-    unsigned long     mfn;
-    unsigned int      addr_size = is_pv_32on64_domain(v->domain) ?
-                                  32 : BITS_PER_LONG;
+    l1_pgentry_t pte;
+    unsigned long mfn;
+    unsigned int addr_size = is_pv_32bit_vcpu(v) ? 32 : BITS_PER_LONG;
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = {
         .ctxt.regs = regs,
         .ctxt.addr_size = addr_size,
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2110,7 +2110,7 @@ void sh_destroy_shadow(struct domain *d,
            t == SH_type_fl1_pae_shadow ||
            t == SH_type_fl1_64_shadow  ||
            t == SH_type_monitor_table  ||
-           (is_pv_32on64_domain(d) && t == SH_type_l4_64_shadow) ||
+           (is_pv_32bit_domain(d) && t == SH_type_l4_64_shadow) ||
            (page_get_owner(mfn_to_page(backpointer(sp))) == d));
 
     /* The down-shifts here are so that the switch statement is on nice
@@ -2139,7 +2139,7 @@ void sh_destroy_shadow(struct domain *d,
         SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(d, smfn);
         break;
     case SH_type_l2h_64_shadow:
-        ASSERT(is_pv_32on64_domain(d));
+        ASSERT(is_pv_32bit_domain(d));
         /* Fall through... */
     case SH_type_l2_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(d, smfn);
@@ -3472,7 +3472,7 @@ static int sh_enable_log_dirty(struct do
     /* 32bit PV guests on 64bit xen behave like older 64bit linux: they
      * change an l4e instead of cr3 to switch tables.  Give them the
      * same optimization */
-    if ( is_pv_32on64_domain(d) )
+    if ( is_pv_32bit_domain(d) )
         d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
 #endif
 
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -131,8 +131,8 @@ set_shadow_status(struct domain *d, mfn_
 
     ASSERT(mfn_to_page(smfn)->u.sh.head);
 
-    /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
-    if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
+    /* 32-bit PV guests don't own their l4 pages so can't get_page them */
+    if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow )
     {
         res = get_page(mfn_to_page(gmfn), d);
         ASSERT(res == 1);
@@ -159,8 +159,8 @@ delete_shadow_status(struct domain *d, m
                   d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
     ASSERT(mfn_to_page(smfn)->u.sh.head);
     shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn);
-    /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
-    if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
+    /* 32-bit PV guests don't own their l4 pages; see set_shadow_status */
+    if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow )
         put_page(mfn_to_page(gmfn));
 }
 
@@ -698,7 +698,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d)
          && is_pv_domain(d) )
     {
         sflags |= _PAGE_USER;
@@ -1346,8 +1346,8 @@ do {                                    
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
     {                                                                       \
         if ( (!(_xen))                                                      \
-             || !is_pv_32on64_domain(_dom)                                  \
-             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
+             || !is_pv_32bit_domain(_dom)                                   \
+             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow    \
              || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
         {                                                                   \
             (_sl2e) = _sp + _i;                                             \
@@ -1435,7 +1435,7 @@ void sh_install_xen_entries_in_l4(struct
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
-    if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) &&
+    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
          !VM_ASSIST(d, m2p_strict) )
     {
         /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */
@@ -1475,7 +1475,7 @@ static void sh_install_xen_entries_in_l2
 {
     shadow_l2e_t *sl2e;
 
-    if ( !is_pv_32on64_domain(d) )
+    if ( !is_pv_32bit_domain(d) )
         return;
 
     sl2e = sh_map_domain_page(sl2hmfn);
@@ -1620,9 +1620,9 @@ sh_make_monitor_table(struct vcpu *v)
             l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
             sh_unmap_domain_page(l3e);
 
-            if ( is_pv_32on64_domain(d) )
+            if ( is_pv_32bit_domain(d) )
             {
-                /* For 32-on-64 PV guests, we need to map the 32-bit Xen
+                /* For 32-bit PV guests, we need to map the 32-bit Xen
                  * area into its usual VAs in the monitor tables */
                 m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m3mfn)->shadow_flags = 3;
@@ -1740,7 +1740,7 @@ static shadow_l2e_t * shadow_get_and_cre
         unsigned int t = SH_type_l2_shadow;
 
         /* Tag compat L2 containing hypervisor (m2p) mappings */
-        if ( is_pv_32on64_domain(v->domain) &&
+        if ( is_pv_32bit_vcpu(v) &&
              guest_l4_table_offset(gw->va) == 0 &&
              guest_l3_table_offset(gw->va) == 3 )
             t = SH_type_l2h_shadow;
@@ -2043,7 +2043,7 @@ void sh_destroy_monitor_table(struct vcp
         sh_unmap_domain_page(l3e);
         shadow_free(d, m3mfn);
 
-        if ( is_pv_32on64_domain(d) )
+        if ( is_pv_32bit_domain(d) )
         {
             /* Need to destroy the l3 and l2 monitor pages that map the
              * Xen VAs at 3GB-4GB */
@@ -3963,7 +3963,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                    (unsigned long)pagetable_get_pfn(v->arch.guest_table));
 
 #if GUEST_PAGING_LEVELS == 4
-    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) )
+    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) )
         gmfn = pagetable_get_mfn(v->arch.guest_table_user);
     else
 #endif
@@ -4078,7 +4078,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
     if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
         flush_tlb_mask(d->domain_dirty_cpumask);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
-    if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
+    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
     {
         mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
 
@@ -5104,7 +5104,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
             gmfn = get_shadow_status(d, get_gfn_query_unlocked(
                                         d, gfn_x(gfn), &p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
-                                       is_pv_32on64_domain(d))
+                                       is_pv_32bit_domain(d))
                                       && !shadow_mode_external(d)
                                       && (guest_index(gl3e) % 4) == 3)
                                      ? SH_type_l2h_shadow
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -951,7 +951,7 @@ void pv_cpuid(struct cpu_user_regs *regs
             __clear_bit(X86_FEATURE_LM % 32, &d);
             __clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
         }
-        if ( is_pv_32on64_domain(currd) &&
+        if ( is_pv_32bit_domain(currd) &&
              boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
             __clear_bit(X86_FEATURE_SYSCALL % 32, &d);
         __clear_bit(X86_FEATURE_PAGE1GB % 32, &d);
@@ -3675,7 +3675,7 @@ long register_guest_nmi_callback(unsigne
 
     t->vector  = TRAP_nmi;
     t->flags   = 0;
-    t->cs      = (is_pv_32on64_domain(d) ?
+    t->cs      = (is_pv_32bit_domain(d) ?
                   FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS);
     t->address = address;
     TI_SET_IF(t, 1);
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1188,7 +1188,7 @@ int handle_memadd_fault(unsigned long ad
     unsigned long mfn, idle_index;
     int ret = 0;
 
-    if (!is_pv_32on64_domain(d))
+    if (!is_pv_32bit_domain(d))
         return 0;
 
     if ( (addr < HYPERVISOR_COMPAT_VIRT_START(d)) ||
@@ -1247,7 +1247,7 @@ unmap:
 
 void domain_set_alloc_bitsize(struct domain *d)
 {
-    if ( !is_pv_32on64_domain(d) ||
+    if ( !is_pv_32bit_domain(d) ||
          (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) ||
          d->arch.physaddr_bitsize > 0 )
         return;
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -495,7 +495,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
             break;
 
 #ifdef CONFIG_COMPAT
-        if ( !is_pv_32on64_domain(d) )
+        if ( !is_pv_32bit_domain(d) )
             ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
         else
             ret = copy_from_guest(c.cmp,
@@ -901,7 +901,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         vcpu_unpause(v);
 
 #ifdef CONFIG_COMPAT
-        if ( !is_pv_32on64_domain(d) )
+        if ( !is_pv_32bit_domain(d) )
             ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
         else
             ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -872,7 +872,7 @@ static int kexec_load_slot(struct kexec_
 static uint16_t kexec_load_v1_arch(void)
 {
 #ifdef CONFIG_X86
-    return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64;
+    return is_pv_32bit_domain(hardware_domain) ? EM_386 : EM_X86_64;
 #else
     return EM_NONE;
 #endif
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -219,7 +219,7 @@ static int alloc_xenoprof_struct(
     bufsize = sizeof(struct xenoprof_buf);
     i = sizeof(struct event_log);
 #ifdef CONFIG_COMPAT
-    d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d);
+    d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d);
     if ( XENOPROF_COMPAT(d->xenoprof) )
     {
         bufsize = sizeof(struct compat_oprof_buf);
--- a/xen/include/asm-x86/desc.h
+++ b/xen/include/asm-x86/desc.h
@@ -65,7 +65,7 @@
  */
 #define guest_gate_selector_okay(d, sel)                                \
     ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
-     ((sel) == (!is_pv_32on64_domain(d) ?                               \
+     ((sel) == (!is_pv_32bit_domain(d) ?                                \
                 FLAT_KERNEL_CS :                /* Xen default seg? */  \
                 FLAT_COMPAT_KERNEL_CS)) ||                              \
      ((sel) & 4))                               /* LDT seg? */
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -14,7 +14,6 @@
 #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
-#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
 
 #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
         d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it
  2015-06-23 15:14 [PATCH 0/3] x86: further is_..._...() adjustments Jan Beulich
  2015-06-23 15:18 ` [PATCH 1/3] x86: drop is_pv_32on64_vcpu() Jan Beulich
  2015-06-23 15:19 ` [PATCH 2/3] x86: drop is_pv_32on64_domain() Jan Beulich
@ 2015-06-23 15:20 ` Jan Beulich
  2015-06-23 15:45   ` Andrew Cooper
  2015-06-23 16:13   ` George Dunlap
  2 siblings, 2 replies; 14+ messages in thread
From: Jan Beulich @ 2015-06-23 15:20 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper, Keir Fraser, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 1301 bytes --]

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1851,9 +1851,7 @@ unsigned long paging_gva_to_gfn(struct v
     struct p2m_domain *hostp2m = p2m_get_hostp2m(v->domain);
     const struct paging_mode *hostmode = paging_get_hostmode(v);
 
-    if ( is_hvm_domain(v->domain)
-        && paging_mode_hap(v->domain) 
-        && nestedhvm_is_n2(v) )
+    if ( is_hvm_vcpu(v) && paging_mode_hap(v->domain) && nestedhvm_is_n2(v) )
     {
         unsigned long gfn;
         struct p2m_domain *p2m;
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -971,7 +971,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn
     if ( pg->shadow_flags &
          ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
          || sh_page_has_multiple_shadows(pg)
-         || is_pv_domain(v->domain)
+         || is_pv_vcpu(v)
          || !v->domain->arch.paging.shadow.oos_active )
         return 0;
 
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -73,6 +73,6 @@ static const struct paging_mode sh_pagin
 
 void shadow_vcpu_init(struct vcpu *v)
 {
-    ASSERT(is_pv_domain(v->domain));
+    ASSERT(is_pv_vcpu(v));
     v->arch.paging.mode = &sh_paging_none;
 }




[-- Attachment #2: x86-use-is_xyz_vcpu.patch --]
[-- Type: text/plain, Size: 1350 bytes --]

x86/mm: use is_..._vcpu() instead of open coding it

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1851,9 +1851,7 @@ unsigned long paging_gva_to_gfn(struct v
     struct p2m_domain *hostp2m = p2m_get_hostp2m(v->domain);
     const struct paging_mode *hostmode = paging_get_hostmode(v);
 
-    if ( is_hvm_domain(v->domain)
-        && paging_mode_hap(v->domain) 
-        && nestedhvm_is_n2(v) )
+    if ( is_hvm_vcpu(v) && paging_mode_hap(v->domain) && nestedhvm_is_n2(v) )
     {
         unsigned long gfn;
         struct p2m_domain *p2m;
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -971,7 +971,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn
     if ( pg->shadow_flags &
          ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
          || sh_page_has_multiple_shadows(pg)
-         || is_pv_domain(v->domain)
+         || is_pv_vcpu(v)
          || !v->domain->arch.paging.shadow.oos_active )
         return 0;
 
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -73,6 +73,6 @@ static const struct paging_mode sh_pagin
 
 void shadow_vcpu_init(struct vcpu *v)
 {
-    ASSERT(is_pv_domain(v->domain));
+    ASSERT(is_pv_vcpu(v));
     v->arch.paging.mode = &sh_paging_none;
 }

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
  2015-06-23 15:18 ` [PATCH 1/3] x86: drop is_pv_32on64_vcpu() Jan Beulich
@ 2015-06-23 15:39   ` Andrew Cooper
  2015-06-24 21:35   ` Boris Ostrovsky
  1 sibling, 0 replies; 14+ messages in thread
From: Andrew Cooper @ 2015-06-23 15:39 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser

On 23/06/15 16:18, Jan Beulich wrote:
> ... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.
>
> In a few cases this includes an additional is_pv_32bit_vcpu() ->
> is_pv_32bit_domain() conversion.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/3] x86: drop is_pv_32on64_domain()
  2015-06-23 15:19 ` [PATCH 2/3] x86: drop is_pv_32on64_domain() Jan Beulich
@ 2015-06-23 15:43   ` Andrew Cooper
  2015-06-23 16:25   ` George Dunlap
  2015-07-06 16:01   ` Ian Campbell
  2 siblings, 0 replies; 14+ messages in thread
From: Andrew Cooper @ 2015-06-23 15:43 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: George Dunlap, Ian Jackson, Keir Fraser, Ian Campbell, Tim Deegan

On 23/06/15 16:19, Jan Beulich wrote:
> ... as being identical to is_pv_32bit_domain() after the x86-32
> removal.
>
> In a few cases this includes no longer open-coding is_pv_32bit_vcpu().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it
  2015-06-23 15:20 ` [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it Jan Beulich
@ 2015-06-23 15:45   ` Andrew Cooper
  2015-06-23 16:13   ` George Dunlap
  1 sibling, 0 replies; 14+ messages in thread
From: Andrew Cooper @ 2015-06-23 15:45 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: George Dunlap, Tim Deegan, Keir Fraser

On 23/06/15 16:20, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it
  2015-06-23 15:20 ` [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it Jan Beulich
  2015-06-23 15:45   ` Andrew Cooper
@ 2015-06-23 16:13   ` George Dunlap
  1 sibling, 0 replies; 14+ messages in thread
From: George Dunlap @ 2015-06-23 16:13 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, Keir Fraser, Tim Deegan

On 06/23/2015 04:20 PM, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>

> 
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1851,9 +1851,7 @@ unsigned long paging_gva_to_gfn(struct v
>      struct p2m_domain *hostp2m = p2m_get_hostp2m(v->domain);
>      const struct paging_mode *hostmode = paging_get_hostmode(v);
>  
> -    if ( is_hvm_domain(v->domain)
> -        && paging_mode_hap(v->domain) 
> -        && nestedhvm_is_n2(v) )
> +    if ( is_hvm_vcpu(v) && paging_mode_hap(v->domain) && nestedhvm_is_n2(v) )
>      {
>          unsigned long gfn;
>          struct p2m_domain *p2m;
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -971,7 +971,7 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn
>      if ( pg->shadow_flags &
>           ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
>           || sh_page_has_multiple_shadows(pg)
> -         || is_pv_domain(v->domain)
> +         || is_pv_vcpu(v)
>           || !v->domain->arch.paging.shadow.oos_active )
>          return 0;
>  
> --- a/xen/arch/x86/mm/shadow/none.c
> +++ b/xen/arch/x86/mm/shadow/none.c
> @@ -73,6 +73,6 @@ static const struct paging_mode sh_pagin
>  
>  void shadow_vcpu_init(struct vcpu *v)
>  {
> -    ASSERT(is_pv_domain(v->domain));
> +    ASSERT(is_pv_vcpu(v));
>      v->arch.paging.mode = &sh_paging_none;
>  }
> 
> 
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/3] x86: drop is_pv_32on64_domain()
  2015-06-23 15:19 ` [PATCH 2/3] x86: drop is_pv_32on64_domain() Jan Beulich
  2015-06-23 15:43   ` Andrew Cooper
@ 2015-06-23 16:25   ` George Dunlap
  2015-07-06 16:01   ` Ian Campbell
  2 siblings, 0 replies; 14+ messages in thread
From: George Dunlap @ 2015-06-23 16:25 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Ian Campbell, Andrew Cooper, Keir Fraser, Ian Jackson, Tim Deegan

On 06/23/2015 04:19 PM, Jan Beulich wrote:
> ... as being identical to is_pv_32bit_domain() after the x86-32
> removal.
> 
> In a few cases this includes no longer open-coding is_pv_32bit_vcpu().
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>

> 
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -367,7 +367,7 @@ int switch_native(struct domain *d)
>  
>      if ( !may_switch_mode(d) )
>          return -EACCES;
> -    if ( !is_pv_32on64_domain(d) )
> +    if ( !is_pv_32bit_domain(d) )
>          return 0;
>  
>      d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
> @@ -392,7 +392,7 @@ int switch_compat(struct domain *d)
>  
>      if ( !may_switch_mode(d) )
>          return -EACCES;
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          return 0;
>  
>      d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
> @@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v)
>  
>      v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
>  
> -    rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0;
> +    rc = is_pv_32bit_domain(d) ? setup_compat_l4(v) : 0;
>   done:
>      if ( rc )
>      {
> @@ -689,7 +689,7 @@ unsigned long pv_guest_cr4_fixup(const s
>      hv_cr4_mask = ~X86_CR4_TSD;
>      if ( cpu_has_de )
>          hv_cr4_mask &= ~X86_CR4_DE;
> -    if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) )
> +    if ( cpu_has_fsgsbase && !is_pv_32bit_vcpu(v) )
>          hv_cr4_mask &= ~X86_CR4_FSGSBASE;
>      if ( cpu_has_xsave )
>          hv_cr4_mask &= ~X86_CR4_OSXSAVE;
> @@ -721,7 +721,7 @@ int arch_set_info_guest(
>  
>      /* The context is a compat-mode one if the target domain is compat-mode;
>       * we expect the tools to DTRT even in compat-mode callers. */
> -    compat = is_pv_32on64_domain(d);
> +    compat = is_pv_32bit_domain(d);
>  
>  #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
>      flags = c(flags);
> @@ -1195,7 +1195,7 @@ static void load_segments(struct vcpu *n
>              all_segs_okay &= loadsegment(gs, uregs->gs);
>      }
>  
> -    if ( !is_pv_32on64_domain(n->domain) )
> +    if ( !is_pv_32bit_vcpu(n) )
>      {
>          /* This can only be non-zero if selector is NULL. */
>          if ( n->arch.pv_vcpu.fs_base )
> @@ -1224,7 +1224,7 @@ static void load_segments(struct vcpu *n
>              (unsigned long *)pv->kernel_sp;
>          unsigned long cs_and_mask, rflags;
>  
> -        if ( is_pv_32on64_domain(n->domain) )
> +        if ( is_pv_32bit_vcpu(n) )
>          {
>              unsigned int *esp = ring_1(regs) ?
>                                  (unsigned int *)regs->rsp :
> @@ -1340,7 +1340,7 @@ static void save_segments(struct vcpu *v
>      if ( regs->es )
>          dirty_segment_mask |= DIRTY_ES;
>  
> -    if ( regs->fs || is_pv_32on64_domain(v->domain) )
> +    if ( regs->fs || is_pv_32bit_vcpu(v) )
>      {
>          dirty_segment_mask |= DIRTY_FS;
>          v->arch.pv_vcpu.fs_base = 0; /* != 0 selector kills fs_base */
> @@ -1350,7 +1350,7 @@ static void save_segments(struct vcpu *v
>          dirty_segment_mask |= DIRTY_FS_BASE;
>      }
>  
> -    if ( regs->gs || is_pv_32on64_domain(v->domain) )
> +    if ( regs->gs || is_pv_32bit_vcpu(v) )
>      {
>          dirty_segment_mask |= DIRTY_GS;
>          v->arch.pv_vcpu.gs_base_user = 0; /* != 0 selector kills gs_base_user */
> @@ -1483,8 +1483,8 @@ static void __context_switch(void)
>  
>      psr_ctxt_switch_to(nd);
>  
> -    gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) :
> -                                     per_cpu(compat_gdt_table, cpu);
> +    gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) :
> +                                    per_cpu(compat_gdt_table, cpu);
>      if ( need_full_gdt(nd) )
>      {
>          unsigned long mfn = virt_to_mfn(gdt);
> @@ -1568,7 +1568,7 @@ void context_switch(struct vcpu *prev, s
>          if ( is_pv_domain(nextd) &&
>               (is_idle_domain(prevd) ||
>                has_hvm_container_domain(prevd) ||
> -              is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) )
> +              is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) )
>          {
>              uint64_t efer = read_efer();
>              if ( !(efer & EFER_SCE) )
> --- a/xen/arch/x86/domain_build.c
> +++ b/xen/arch/x86/domain_build.c
> @@ -293,7 +293,7 @@ static unsigned long __init compute_dom0
>      avail -= (d->max_vcpus - 1UL)
>               << get_order_from_bytes(sizeof(struct vcpu));
>      /* ...and compat_l4's, if needed. */
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          avail -= d->max_vcpus - 1;
>  
>      /* Reserve memory for iommu_dom0_init() (rough estimate). */
> @@ -608,7 +608,7 @@ static __init void dom0_update_physmap(s
>          BUG_ON(rc);
>          return;
>      }
> -    if ( !is_pv_32on64_domain(d) )
> +    if ( !is_pv_32bit_domain(d) )
>          ((unsigned long *)vphysmap_s)[pfn] = mfn;
>      else
>          ((unsigned int *)vphysmap_s)[pfn] = mfn;
> @@ -718,7 +718,7 @@ static __init void mark_pv_pt_pages_rdon
>  
>          /* Top-level p.t. is pinned. */
>          if ( (page->u.inuse.type_info & PGT_type_mask) ==
> -             (!is_pv_32on64_domain(d) ?
> +             (!is_pv_32bit_domain(d) ?
>                PGT_l4_page_table : PGT_l3_page_table) )
>          {
>              page->count_info        += 1;
> @@ -1048,7 +1048,7 @@ int __init construct_dom0(
>          vinitrd_end    = vinitrd_start + initrd_len;
>          vphysmap_start = round_pgup(vinitrd_end);
>      }
> -    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
> +    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32bit_domain(d) ?
>                                                       sizeof(unsigned long) :
>                                                       sizeof(unsigned int)));
>      if ( parms.p2m_base != UNSET_ADDR )
> @@ -1076,9 +1076,9 @@ int __init construct_dom0(
>  #define NR(_l,_h,_s) \
>      (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
>         ((_l) & ~((1UL<<(_s))-1))) >> (_s))
> -        if ( (!is_pv_32on64_domain(d) + /* # L4 */
> +        if ( (!is_pv_32bit_domain(d) + /* # L4 */
>                NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
> -              (!is_pv_32on64_domain(d) ?
> +              (!is_pv_32bit_domain(d) ?
>                 NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
>                 4) + /* # compat L2 */
>                NR(v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
> @@ -1176,7 +1176,7 @@ int __init construct_dom0(
>          mpt_alloc -= PAGE_ALIGN(initrd_len);
>  
>      /* Overlap with Xen protected area? */
> -    if ( !is_pv_32on64_domain(d) ?
> +    if ( !is_pv_32bit_domain(d) ?
>           ((v_start < HYPERVISOR_VIRT_END) &&
>            (v_end > HYPERVISOR_VIRT_START)) :
>           (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
> @@ -1186,14 +1186,14 @@ int __init construct_dom0(
>          goto out;
>      }
>  
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>      {
>          v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
>          v->arch.pv_vcpu.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
>      }
>  
>      /* WARNING: The new domain must have its 'processor' field filled in! */
> -    if ( !is_pv_32on64_domain(d) )
> +    if ( !is_pv_32bit_domain(d) )
>      {
>          maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
>          l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
> @@ -1211,7 +1211,7 @@ int __init construct_dom0(
>      clear_page(l4tab);
>      init_guest_l4_table(l4tab, d, 0);
>      v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          v->arch.guest_table_user = v->arch.guest_table;
>  
>      l4tab += l4_table_offset(v_start);
> @@ -1257,7 +1257,7 @@ int __init construct_dom0(
>              mfn = pfn++;
>          else
>              mfn = initrd_mfn++;
> -        *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ?
> +        *l1tab = l1e_from_pfn(mfn, (!is_pv_32bit_domain(d) ?
>                                      L1_PROT : COMPAT_L1_PROT));
>          l1tab++;
>  
> @@ -1270,7 +1270,7 @@ int __init construct_dom0(
>          }
>      }
>  
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>      {
>          /* Ensure the first four L3 entries are all populated. */
>          for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
> @@ -1477,7 +1477,7 @@ int __init construct_dom0(
>      if ( is_pvh_domain(d) )
>          si->shared_info = shared_info_paddr;
>  
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          xlat_start_info(si, XLAT_start_info_console_dom0);
>  
>      /* Return to idle domain's page tables. */
> @@ -1499,10 +1499,10 @@ int __init construct_dom0(
>       */
>      regs = &v->arch.user_regs;
>      regs->ds = regs->es = regs->fs = regs->gs =
> -        !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
> -    regs->ss = (!is_pv_32on64_domain(d) ?
> +        !is_pv_32bit_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
> +    regs->ss = (!is_pv_32bit_domain(d) ?
>                  FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
> -    regs->cs = (!is_pv_32on64_domain(d) ?
> +    regs->cs = (!is_pv_32bit_domain(d) ?
>                  FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
>      regs->eip = parms.virt_entry;
>      regs->esp = vstack_end;
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -349,7 +349,7 @@ long arch_do_domctl(
>  
>      case XEN_DOMCTL_get_address_size:
>          domctl->u.address_size.size =
> -            is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
> +            is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
>          copyback = 1;
>          break;
>  
> @@ -1183,7 +1183,7 @@ void arch_get_info_guest(struct vcpu *v,
>  {
>      unsigned int i;
>      const struct domain *d = v->domain;
> -    bool_t compat = is_pv_32on64_domain(d);
> +    bool_t compat = is_pv_32bit_domain(d);
>  #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
>  
>      if ( !is_pv_domain(d) )
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -163,9 +163,8 @@ static uint32_t base_disallow_mask;
>  #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
>  #define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
>  
> -#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
> -                             base_disallow_mask :       \
> -                             0xFFFFF198U)
> +#define l3_disallow_mask(d) (!is_pv_32bit_domain(d) ? \
> +                             base_disallow_mask : 0xFFFFF198U)
>  
>  #define L4_DISALLOW_MASK (base_disallow_mask)
>  
> @@ -985,7 +984,7 @@ get_page_from_l4e(
>  #define adjust_guest_l1e(pl1e, d)                                            \
>      do {                                                                     \
>          if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&                \
> -             likely(!is_pv_32on64_domain(d)) )                               \
> +             likely(!is_pv_32bit_domain(d)) )                                \
>          {                                                                    \
>              /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */    \
>              if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
> @@ -1002,14 +1001,14 @@ get_page_from_l4e(
>  #define adjust_guest_l2e(pl2e, d)                               \
>      do {                                                        \
>          if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) &&   \
> -             likely(!is_pv_32on64_domain(d)) )                  \
> +             likely(!is_pv_32bit_domain(d)) )                   \
>              l2e_add_flags((pl2e), _PAGE_USER);                  \
>      } while ( 0 )
>  
>  #define adjust_guest_l3e(pl3e, d)                                   \
>      do {                                                            \
>          if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )        \
> -            l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \
> +            l3e_add_flags((pl3e), likely(!is_pv_32bit_domain(d)) ?  \
>                                           _PAGE_USER :               \
>                                           _PAGE_USER|_PAGE_RW);      \
>      } while ( 0 )
> @@ -1017,13 +1016,13 @@ get_page_from_l4e(
>  #define adjust_guest_l4e(pl4e, d)                               \
>      do {                                                        \
>          if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) &&   \
> -             likely(!is_pv_32on64_domain(d)) )                  \
> +             likely(!is_pv_32bit_domain(d)) )                   \
>              l4e_add_flags((pl4e), _PAGE_USER);                  \
>      } while ( 0 )
>  
>  #define unadjust_guest_l3e(pl3e, d)                                         \
>      do {                                                                    \
> -        if ( unlikely(is_pv_32on64_domain(d)) &&                            \
> +        if ( unlikely(is_pv_32bit_domain(d)) &&                             \
>               likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )                \
>              l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);   \
>      } while ( 0 )
> @@ -1314,7 +1313,7 @@ static int alloc_l3_table(struct page_in
>       * 512 entries must be valid/verified, which is most easily achieved
>       * by clearing them out.
>       */
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e));
>  
>      for ( i = page->nr_validated_ptes; i < L3_PAGETABLE_ENTRIES;
> @@ -1391,7 +1390,7 @@ void init_guest_l4_table(l4_pgentry_t l4
>          l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
>      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
>          l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
> -    if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
> +    if ( zap_ro_mpt || is_pv_32bit_domain(d) || paging_mode_refcounts(d) )
>          l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
>  }
>  
> @@ -2707,7 +2706,7 @@ int new_guest_cr3(unsigned long mfn)
>      int rc;
>      unsigned long old_base_mfn;
>  
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>      {
>          unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
>          l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
> @@ -2856,7 +2855,7 @@ static inline int vcpumask_to_pcpumask(
>      unsigned int vcpu_id, vcpu_bias, offs;
>      unsigned long vmask;
>      struct vcpu *v;
> -    bool_t is_native = !is_pv_32on64_domain(d);
> +    bool_t is_native = !is_pv_32bit_domain(d);
>  
>      cpumask_clear(pmask);
>      for ( vmask = 0, offs = 0; ; ++offs)
> @@ -5165,7 +5164,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
>      ptwr_ctxt.ctxt.regs = regs;
>      ptwr_ctxt.ctxt.force_writeback = 0;
>      ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
> -        is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
> +        is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG;
>      ptwr_ctxt.ctxt.swint_emulate = x86_swint_emulate_none;
>      ptwr_ctxt.cr2 = addr;
>      ptwr_ctxt.pte = pte;
> @@ -5235,10 +5234,9 @@ static const struct x86_emulate_ops mmio
>  int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
>                            struct cpu_user_regs *regs)
>  {
> -    l1_pgentry_t      pte;
> -    unsigned long     mfn;
> -    unsigned int      addr_size = is_pv_32on64_domain(v->domain) ?
> -                                  32 : BITS_PER_LONG;
> +    l1_pgentry_t pte;
> +    unsigned long mfn;
> +    unsigned int addr_size = is_pv_32bit_vcpu(v) ? 32 : BITS_PER_LONG;
>      struct mmio_ro_emulate_ctxt mmio_ro_ctxt = {
>          .ctxt.regs = regs,
>          .ctxt.addr_size = addr_size,
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -2110,7 +2110,7 @@ void sh_destroy_shadow(struct domain *d,
>             t == SH_type_fl1_pae_shadow ||
>             t == SH_type_fl1_64_shadow  ||
>             t == SH_type_monitor_table  ||
> -           (is_pv_32on64_domain(d) && t == SH_type_l4_64_shadow) ||
> +           (is_pv_32bit_domain(d) && t == SH_type_l4_64_shadow) ||
>             (page_get_owner(mfn_to_page(backpointer(sp))) == d));
>  
>      /* The down-shifts here are so that the switch statement is on nice
> @@ -2139,7 +2139,7 @@ void sh_destroy_shadow(struct domain *d,
>          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(d, smfn);
>          break;
>      case SH_type_l2h_64_shadow:
> -        ASSERT(is_pv_32on64_domain(d));
> +        ASSERT(is_pv_32bit_domain(d));
>          /* Fall through... */
>      case SH_type_l2_64_shadow:
>          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4)(d, smfn);
> @@ -3472,7 +3472,7 @@ static int sh_enable_log_dirty(struct do
>      /* 32bit PV guests on 64bit xen behave like older 64bit linux: they
>       * change an l4e instead of cr3 to switch tables.  Give them the
>       * same optimization */
> -    if ( is_pv_32on64_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
>  #endif
>  
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -131,8 +131,8 @@ set_shadow_status(struct domain *d, mfn_
>  
>      ASSERT(mfn_to_page(smfn)->u.sh.head);
>  
> -    /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
> -    if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
> +    /* 32-bit PV guests don't own their l4 pages so can't get_page them */
> +    if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow )
>      {
>          res = get_page(mfn_to_page(gmfn), d);
>          ASSERT(res == 1);
> @@ -159,8 +159,8 @@ delete_shadow_status(struct domain *d, m
>                    d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
>      ASSERT(mfn_to_page(smfn)->u.sh.head);
>      shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn);
> -    /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
> -    if ( !is_pv_32on64_domain(d) || shadow_type != SH_type_l4_64_shadow )
> +    /* 32-bit PV guests don't own their l4 pages; see set_shadow_status */
> +    if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow )
>          put_page(mfn_to_page(gmfn));
>  }
>  
> @@ -698,7 +698,7 @@ _sh_propagate(struct vcpu *v,
>      // PV guests in 64-bit mode use two different page tables for user vs
>      // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
>      // It is always shadowed as present...
> -    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
> +    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32bit_domain(d)
>           && is_pv_domain(d) )
>      {
>          sflags |= _PAGE_USER;
> @@ -1346,8 +1346,8 @@ do {                                    
>      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
>      {                                                                       \
>          if ( (!(_xen))                                                      \
> -             || !is_pv_32on64_domain(_dom)                                  \
> -             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
> +             || !is_pv_32bit_domain(_dom)                                   \
> +             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow    \
>               || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
>          {                                                                   \
>              (_sl2e) = _sp + _i;                                             \
> @@ -1435,7 +1435,7 @@ void sh_install_xen_entries_in_l4(struct
>          shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
>                              __PAGE_HYPERVISOR);
>  
> -    if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) &&
> +    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
>           !VM_ASSIST(d, m2p_strict) )
>      {
>          /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */
> @@ -1475,7 +1475,7 @@ static void sh_install_xen_entries_in_l2
>  {
>      shadow_l2e_t *sl2e;
>  
> -    if ( !is_pv_32on64_domain(d) )
> +    if ( !is_pv_32bit_domain(d) )
>          return;
>  
>      sl2e = sh_map_domain_page(sl2hmfn);
> @@ -1620,9 +1620,9 @@ sh_make_monitor_table(struct vcpu *v)
>              l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
>              sh_unmap_domain_page(l3e);
>  
> -            if ( is_pv_32on64_domain(d) )
> +            if ( is_pv_32bit_domain(d) )
>              {
> -                /* For 32-on-64 PV guests, we need to map the 32-bit Xen
> +                /* For 32-bit PV guests, we need to map the 32-bit Xen
>                   * area into its usual VAs in the monitor tables */
>                  m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
>                  mfn_to_page(m3mfn)->shadow_flags = 3;
> @@ -1740,7 +1740,7 @@ static shadow_l2e_t * shadow_get_and_cre
>          unsigned int t = SH_type_l2_shadow;
>  
>          /* Tag compat L2 containing hypervisor (m2p) mappings */
> -        if ( is_pv_32on64_domain(v->domain) &&
> +        if ( is_pv_32bit_vcpu(v) &&
>               guest_l4_table_offset(gw->va) == 0 &&
>               guest_l3_table_offset(gw->va) == 3 )
>              t = SH_type_l2h_shadow;
> @@ -2043,7 +2043,7 @@ void sh_destroy_monitor_table(struct vcp
>          sh_unmap_domain_page(l3e);
>          shadow_free(d, m3mfn);
>  
> -        if ( is_pv_32on64_domain(d) )
> +        if ( is_pv_32bit_domain(d) )
>          {
>              /* Need to destroy the l3 and l2 monitor pages that map the
>               * Xen VAs at 3GB-4GB */
> @@ -3963,7 +3963,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
>                     (unsigned long)pagetable_get_pfn(v->arch.guest_table));
>  
>  #if GUEST_PAGING_LEVELS == 4
> -    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) )
> +    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) )
>          gmfn = pagetable_get_mfn(v->arch.guest_table_user);
>      else
>  #endif
> @@ -4078,7 +4078,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
>      if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
>          flush_tlb_mask(d->domain_dirty_cpumask);
>      sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
> -    if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
> +    if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
>      {
>          mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
>  
> @@ -5104,7 +5104,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
>              gmfn = get_shadow_status(d, get_gfn_query_unlocked(
>                                          d, gfn_x(gfn), &p2mt),
>                                       ((GUEST_PAGING_LEVELS == 3 ||
> -                                       is_pv_32on64_domain(d))
> +                                       is_pv_32bit_domain(d))
>                                        && !shadow_mode_external(d)
>                                        && (guest_index(gl3e) % 4) == 3)
>                                       ? SH_type_l2h_shadow
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -951,7 +951,7 @@ void pv_cpuid(struct cpu_user_regs *regs
>              __clear_bit(X86_FEATURE_LM % 32, &d);
>              __clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
>          }
> -        if ( is_pv_32on64_domain(currd) &&
> +        if ( is_pv_32bit_domain(currd) &&
>               boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
>              __clear_bit(X86_FEATURE_SYSCALL % 32, &d);
>          __clear_bit(X86_FEATURE_PAGE1GB % 32, &d);
> @@ -3675,7 +3675,7 @@ long register_guest_nmi_callback(unsigne
>  
>      t->vector  = TRAP_nmi;
>      t->flags   = 0;
> -    t->cs      = (is_pv_32on64_domain(d) ?
> +    t->cs      = (is_pv_32bit_domain(d) ?
>                    FLAT_COMPAT_KERNEL_CS : FLAT_KERNEL_CS);
>      t->address = address;
>      TI_SET_IF(t, 1);
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -1188,7 +1188,7 @@ int handle_memadd_fault(unsigned long ad
>      unsigned long mfn, idle_index;
>      int ret = 0;
>  
> -    if (!is_pv_32on64_domain(d))
> +    if (!is_pv_32bit_domain(d))
>          return 0;
>  
>      if ( (addr < HYPERVISOR_COMPAT_VIRT_START(d)) ||
> @@ -1247,7 +1247,7 @@ unmap:
>  
>  void domain_set_alloc_bitsize(struct domain *d)
>  {
> -    if ( !is_pv_32on64_domain(d) ||
> +    if ( !is_pv_32bit_domain(d) ||
>           (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) ||
>           d->arch.physaddr_bitsize > 0 )
>          return;
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -495,7 +495,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
>              break;
>  
>  #ifdef CONFIG_COMPAT
> -        if ( !is_pv_32on64_domain(d) )
> +        if ( !is_pv_32bit_domain(d) )
>              ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
>          else
>              ret = copy_from_guest(c.cmp,
> @@ -901,7 +901,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
>          vcpu_unpause(v);
>  
>  #ifdef CONFIG_COMPAT
> -        if ( !is_pv_32on64_domain(d) )
> +        if ( !is_pv_32bit_domain(d) )
>              ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
>          else
>              ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
> --- a/xen/common/kexec.c
> +++ b/xen/common/kexec.c
> @@ -872,7 +872,7 @@ static int kexec_load_slot(struct kexec_
>  static uint16_t kexec_load_v1_arch(void)
>  {
>  #ifdef CONFIG_X86
> -    return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64;
> +    return is_pv_32bit_domain(hardware_domain) ? EM_386 : EM_X86_64;
>  #else
>      return EM_NONE;
>  #endif
> --- a/xen/common/xenoprof.c
> +++ b/xen/common/xenoprof.c
> @@ -219,7 +219,7 @@ static int alloc_xenoprof_struct(
>      bufsize = sizeof(struct xenoprof_buf);
>      i = sizeof(struct event_log);
>  #ifdef CONFIG_COMPAT
> -    d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d);
> +    d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d);
>      if ( XENOPROF_COMPAT(d->xenoprof) )
>      {
>          bufsize = sizeof(struct compat_oprof_buf);
> --- a/xen/include/asm-x86/desc.h
> +++ b/xen/include/asm-x86/desc.h
> @@ -65,7 +65,7 @@
>   */
>  #define guest_gate_selector_okay(d, sel)                                \
>      ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
> -     ((sel) == (!is_pv_32on64_domain(d) ?                               \
> +     ((sel) == (!is_pv_32bit_domain(d) ?                                \
>                  FLAT_KERNEL_CS :                /* Xen default seg? */  \
>                  FLAT_COMPAT_KERNEL_CS)) ||                              \
>       ((sel) & 4))                               /* LDT seg? */
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -14,7 +14,6 @@
>  #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
>  #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
>  #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
> -#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
>  
>  #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
>          d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
> 
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
  2015-06-23 15:18 ` [PATCH 1/3] x86: drop is_pv_32on64_vcpu() Jan Beulich
  2015-06-23 15:39   ` Andrew Cooper
@ 2015-06-24 21:35   ` Boris Ostrovsky
  2015-06-24 23:49     ` Andrew Cooper
  1 sibling, 1 reply; 14+ messages in thread
From: Boris Ostrovsky @ 2015-06-24 21:35 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, Keir Fraser

On 06/23/2015 11:18 AM, Jan Beulich wrote:
> ... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.
>
> In a few cases this includes an additional is_pv_32bit_vcpu() ->
> is_pv_32bit_domain() conversion.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

We have
struct arch_domain
{
     ...
     /* Is a 32-bit PV (non-HVM) guest? */
     bool_t is_32bit_pv;
     /* Is shared-info page in 32-bit format? */
     bool_t has_32bit_shinfo;
    ...
}

and currently both of these fields are set/unset together (except for 
one HVM case --- hvm_latch_shinfo_size()). Why not have a single 'bool 
is_32bit' and then replace macros at the top of include/asm-x86/domain.h 
with is_32bit_vcpu/domain()?

I think in majority of places when we test for is_pv_32bit_vcpu/domain() 
we already know that we are PV so it shouldn't add any additional tests.

-boris

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
  2015-06-24 21:35   ` Boris Ostrovsky
@ 2015-06-24 23:49     ` Andrew Cooper
  2015-06-25  1:08       ` Boris Ostrovsky
  0 siblings, 1 reply; 14+ messages in thread
From: Andrew Cooper @ 2015-06-24 23:49 UTC (permalink / raw)
  To: Boris Ostrovsky, Jan Beulich, xen-devel; +Cc: Keir Fraser

On 24/06/2015 22:35, Boris Ostrovsky wrote:
> On 06/23/2015 11:18 AM, Jan Beulich wrote:
>> ... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.
>>
>> In a few cases this includes an additional is_pv_32bit_vcpu() ->
>> is_pv_32bit_domain() conversion.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> We have
> struct arch_domain
> {
>     ...
>     /* Is a 32-bit PV (non-HVM) guest? */
>     bool_t is_32bit_pv;
>     /* Is shared-info page in 32-bit format? */
>     bool_t has_32bit_shinfo;
>    ...
> }
>
> and currently both of these fields are set/unset together (except for
> one HVM case --- hvm_latch_shinfo_size()). Why not have a single 'bool
> is_32bit' and then replace macros at the top of
> include/asm-x86/domain.h with is_32bit_vcpu/domain()?
>
> I think in majority of places when we test for
> is_pv_32bit_vcpu/domain() we already know that we are PV so it
> shouldn't add any additional tests.

For the PV case, the two are equivalent.  For HVM, they are not.

HVM domains have shared info, but may be latched as either 32 or 64bit,
depending on the mode they were running in when they most recently wrote
a hypercall page.  Sadly, the shared info layout is width-dependent
which is why such hacks need to exist.

~Andrew

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
  2015-06-24 23:49     ` Andrew Cooper
@ 2015-06-25  1:08       ` Boris Ostrovsky
  2015-06-25  7:55         ` Jan Beulich
  0 siblings, 1 reply; 14+ messages in thread
From: Boris Ostrovsky @ 2015-06-25  1:08 UTC (permalink / raw)
  To: Andrew Cooper, Jan Beulich, xen-devel; +Cc: Keir Fraser

On 06/24/2015 07:49 PM, Andrew Cooper wrote:
> On 24/06/2015 22:35, Boris Ostrovsky wrote:
>> On 06/23/2015 11:18 AM, Jan Beulich wrote:
>>> ... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.
>>>
>>> In a few cases this includes an additional is_pv_32bit_vcpu() ->
>>> is_pv_32bit_domain() conversion.
>>>
>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> We have
>> struct arch_domain
>> {
>>      ...
>>      /* Is a 32-bit PV (non-HVM) guest? */
>>      bool_t is_32bit_pv;
>>      /* Is shared-info page in 32-bit format? */
>>      bool_t has_32bit_shinfo;
>>     ...
>> }
>>
>> and currently both of these fields are set/unset together (except for
>> one HVM case --- hvm_latch_shinfo_size()). Why not have a single 'bool
>> is_32bit' and then replace macros at the top of
>> include/asm-x86/domain.h with is_32bit_vcpu/domain()?
>>
>> I think in majority of places when we test for
>> is_pv_32bit_vcpu/domain() we already know that we are PV so it
>> shouldn't add any additional tests.
> For the PV case, the two are equivalent.  For HVM, they are not.
>
> HVM domains have shared info, but may be latched as either 32 or 64bit,
> depending on the mode they were running in when they most recently wrote
> a hypercall page.  Sadly, the shared info layout is width-dependent
> which is why such hacks need to exist.

Why can't we latch the mode into is_32bit field? I am essentially 
suggesting to drop is_32bit_pv and rename has_32bit_shinfo to is_32bit. 
Then is_pv_32bit_vcpu() becomes '(is_pv_vcpu() && domain->is_32bit)' (or 
simply domain->is_32bit, depending on context) and has_32bit_shinfo()  
becomes domain->is_32bit.

The reason I am asking is because for the 32b PVH I will need to switch 
a few places from using is_pv_32bit_vcpu() to has_32bit_shinfo() and 
that would look strange: asking whether the guest is 32-bit looks more 
natural than asking whether its shared info is 32-bit. At least it's 
more natural to my eye.

-boris

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] x86: drop is_pv_32on64_vcpu()
  2015-06-25  1:08       ` Boris Ostrovsky
@ 2015-06-25  7:55         ` Jan Beulich
  0 siblings, 0 replies; 14+ messages in thread
From: Jan Beulich @ 2015-06-25  7:55 UTC (permalink / raw)
  To: Boris Ostrovsky; +Cc: Andrew Cooper, Keir Fraser, xen-devel

>>> On 25.06.15 at 03:08, <boris.ostrovsky@oracle.com> wrote:
> On 06/24/2015 07:49 PM, Andrew Cooper wrote:
>> On 24/06/2015 22:35, Boris Ostrovsky wrote:
>>> On 06/23/2015 11:18 AM, Jan Beulich wrote:
>>>> ... as being identical to is_pv_32bit_vcpu() after the x86-32 removal.
>>>>
>>>> In a few cases this includes an additional is_pv_32bit_vcpu() ->
>>>> is_pv_32bit_domain() conversion.
>>>>
>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>> We have
>>> struct arch_domain
>>> {
>>>      ...
>>>      /* Is a 32-bit PV (non-HVM) guest? */
>>>      bool_t is_32bit_pv;
>>>      /* Is shared-info page in 32-bit format? */
>>>      bool_t has_32bit_shinfo;
>>>     ...
>>> }
>>>
>>> and currently both of these fields are set/unset together (except for
>>> one HVM case --- hvm_latch_shinfo_size()). Why not have a single 'bool
>>> is_32bit' and then replace macros at the top of
>>> include/asm-x86/domain.h with is_32bit_vcpu/domain()?
>>>
>>> I think in majority of places when we test for
>>> is_pv_32bit_vcpu/domain() we already know that we are PV so it
>>> shouldn't add any additional tests.
>> For the PV case, the two are equivalent.  For HVM, they are not.
>>
>> HVM domains have shared info, but may be latched as either 32 or 64bit,
>> depending on the mode they were running in when they most recently wrote
>> a hypercall page.  Sadly, the shared info layout is width-dependent
>> which is why such hacks need to exist.
> 
> Why can't we latch the mode into is_32bit field? I am essentially 
> suggesting to drop is_32bit_pv and rename has_32bit_shinfo to is_32bit. 
> Then is_pv_32bit_vcpu() becomes '(is_pv_vcpu() && domain->is_32bit)' (or 
> simply domain->is_32bit, depending on context) and has_32bit_shinfo()  
> becomes domain->is_32bit.
> 
> The reason I am asking is because for the 32b PVH I will need to switch 
> a few places from using is_pv_32bit_vcpu() to has_32bit_shinfo() and 
> that would look strange: asking whether the guest is 32-bit looks more 
> natural than asking whether its shared info is 32-bit. At least it's 
> more natural to my eye.

But it's incorrect: Guest mode may change, and claiming a guest is
32-bit is valid only for PV (at least as soon as PVH is permitted to
switch between 32- and 64-bit mode). HVM guests (and by analogy
PVH ones) should have no general bitness associated with them
(which is also why the macro name has "pv" in it) - the mode they
currently execute in is variable, and we explicitly don't tie shared
info layout to their current execution mode.

Introducing arbitrary checks like you suggest (for other than shared
info accesses) would need to be temporary, i.e. would need to be
tagged with a fixme comment (of which - as said recently - I'd prefer
not to see new instances introduced in connection with PVH).

Jan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/3] x86: drop is_pv_32on64_domain()
  2015-06-23 15:19 ` [PATCH 2/3] x86: drop is_pv_32on64_domain() Jan Beulich
  2015-06-23 15:43   ` Andrew Cooper
  2015-06-23 16:25   ` George Dunlap
@ 2015-07-06 16:01   ` Ian Campbell
  2 siblings, 0 replies; 14+ messages in thread
From: Ian Campbell @ 2015-07-06 16:01 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Keir Fraser, George Dunlap, Ian Jackson, Tim Deegan,
	Andrew Cooper, xen-devel

On Tue, 2015-06-23 at 16:19 +0100, Jan Beulich wrote:
> ... as being identical to is_pv_32bit_domain() after the x86-32
> removal.
> 
> In a few cases this includes no longer open-coding is_pv_32bit_vcpu().
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Given that the common impact is effectively x86-only due to the ifdefs
(CONFIG_COMPAT is effectively x86 IMHO) I don't think you really need it
but:

Acked-by: Ian Campbell <ian.campbell@citrix.com>

> --- a/xen/common/kexec.c
> +++ b/xen/common/kexec.c
> @@ -872,7 +872,7 @@ static int kexec_load_slot(struct kexec_
>  static uint16_t kexec_load_v1_arch(void)
>  {
>  #ifdef CONFIG_X86
> -    return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64;
> +    return is_pv_32bit_domain(hardware_domain) ? EM_386 : EM_X86_64;
>  #else
>      return EM_NONE;
>  #endif
> --- a/xen/common/xenoprof.c
> +++ b/xen/common/xenoprof.c
> @@ -219,7 +219,7 @@ static int alloc_xenoprof_struct(
>      bufsize = sizeof(struct xenoprof_buf);
>      i = sizeof(struct event_log);
>  #ifdef CONFIG_COMPAT
> -    d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d);
> +    d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d);
>      if ( XENOPROF_COMPAT(d->xenoprof) )
>      {
>          bufsize = sizeof(struct compat_oprof_buf);

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2015-07-06 16:15 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-06-23 15:14 [PATCH 0/3] x86: further is_..._...() adjustments Jan Beulich
2015-06-23 15:18 ` [PATCH 1/3] x86: drop is_pv_32on64_vcpu() Jan Beulich
2015-06-23 15:39   ` Andrew Cooper
2015-06-24 21:35   ` Boris Ostrovsky
2015-06-24 23:49     ` Andrew Cooper
2015-06-25  1:08       ` Boris Ostrovsky
2015-06-25  7:55         ` Jan Beulich
2015-06-23 15:19 ` [PATCH 2/3] x86: drop is_pv_32on64_domain() Jan Beulich
2015-06-23 15:43   ` Andrew Cooper
2015-06-23 16:25   ` George Dunlap
2015-07-06 16:01   ` Ian Campbell
2015-06-23 15:20 ` [PATCH 3/3] x86/mm: use is_..._vcpu() instead of open coding it Jan Beulich
2015-06-23 15:45   ` Andrew Cooper
2015-06-23 16:13   ` George Dunlap

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.