All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] x86: properly handle vcpu-to-pcpu mask conversion
@ 2009-06-10 14:36 Jan Beulich
  2009-06-10 16:48 ` Keir Fraser
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2009-06-10 14:36 UTC (permalink / raw)
  To: xen-devel

This is one of the prerequisites to extend to number of vCPU-s the
hypervisor can support per guest.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

--- 2009-06-10.orig/xen/arch/x86/mm.c	2009-06-05 11:59:48.000000000 +0200
+++ 2009-06-10/xen/arch/x86/mm.c	2009-06-10 10:46:17.000000000 +0200
@@ -2565,29 +2565,41 @@ static int set_foreigndom(domid_t domid)
     return okay;
 }
 
-static inline cpumask_t vcpumask_to_pcpumask(
-    struct domain *d, unsigned long vmask)
+static inline int vcpumask_to_pcpumask(
+    struct domain *d, XEN_GUEST_HANDLE(const_void) bmap, cpumask_t *pmask)
 {
-    unsigned int vcpu_id;
-    cpumask_t    pmask = CPU_MASK_NONE;
+    unsigned int vcpu_id, vcpu_bias, offs;
+    unsigned long vmask;
     struct vcpu *v;
+    bool_t is_native = !is_pv_32on64_domain(d);
 
-    /*
-     * Callers copy only a single guest-sized longword from the guest.
-     * This must be wide enough to reference all VCPUs. Worst case is 32 bits.
-     */
-    BUILD_BUG_ON(MAX_VIRT_CPUS > 32);
-
-    while ( vmask != 0 )
+    cpus_clear(*pmask);
+    for ( vmask = 0, offs = 0; ; ++offs)
     {
-        vcpu_id = find_first_set_bit(vmask);
-        vmask &= ~(1UL << vcpu_id);
-        if ( (vcpu_id < MAX_VIRT_CPUS) &&
-             ((v = d->vcpu[vcpu_id]) != NULL) )
-            cpus_or(pmask, pmask, v->vcpu_dirty_cpumask);
-    }
+        vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
+        if ( vcpu_bias >= MAX_VIRT_CPUS )
+            return 0;
+
+        if ( unlikely(is_native ?
+                      copy_from_guest_offset(&vmask, bmap, offs, 1) :
+                      copy_from_guest_offset((unsigned int *)&vmask, bmap,
+                                             offs, 1)) )
+        {
+            cpus_clear(*pmask);
+            return -EFAULT;
+        }
 
-    return pmask;
+        while ( vmask )
+        {
+            vcpu_id = find_first_set_bit(vmask);
+            vmask &= ~(1UL << vcpu_id);
+            vcpu_id += vcpu_bias;
+            if ( (vcpu_id >= MAX_VIRT_CPUS) )
+                return 0;
+            if ( ((v = d->vcpu[vcpu_id]) != NULL) )
+                cpus_or(*pmask, *pmask, v->vcpu_dirty_cpumask);
+        }
+    }
 }
 
 #ifdef __i386__
@@ -2816,14 +2828,13 @@ int do_mmuext_op(
         case MMUEXT_TLB_FLUSH_MULTI:
         case MMUEXT_INVLPG_MULTI:
         {
-            unsigned long vmask;
             cpumask_t     pmask;
-            if ( unlikely(copy_from_guest(&vmask, op.arg2.vcpumask, 1)) )
+
+            if ( unlikely(vcpumask_to_pcpumask(d, op.arg2.vcpumask, &pmask)) )
             {
                 okay = 0;
                 break;
             }
-            pmask = vcpumask_to_pcpumask(d, vmask);
             if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
                 flush_tlb_mask(&pmask);
             else
@@ -3630,7 +3641,7 @@ int do_update_va_mapping(unsigned long v
     struct domain *d   = v->domain;
     struct page_info *gl1pg;
     l1_pgentry_t  *pl1e;
-    unsigned long  vmask, bmap_ptr, gl1mfn;
+    unsigned long  bmap_ptr, gl1mfn;
     cpumask_t      pmask;
     int            rc;
 
@@ -3682,11 +3693,9 @@ int do_update_va_mapping(unsigned long v
         default:
             if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_ALL_TLBS )
                 break;
-            if ( unlikely(!is_pv_32on64_domain(d) ?
-                          get_user(vmask, (unsigned long *)bmap_ptr) :
-                          get_user(vmask, (unsigned int *)bmap_ptr)) )
-                rc = -EFAULT, vmask = 0;
-            pmask = vcpumask_to_pcpumask(d, vmask);
+            rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr,
+                                                                     void),
+                                      &pmask);
             if ( cpu_isset(smp_processor_id(), pmask) )
                 this_cpu(percpu_mm_info).deferred_ops &= ~DOP_FLUSH_TLB;
             flush_tlb_mask(&pmask);
@@ -3710,11 +3719,9 @@ int do_update_va_mapping(unsigned long v
             flush_tlb_one_mask(&d->domain_dirty_cpumask, va);
             break;
         default:
-            if ( unlikely(!is_pv_32on64_domain(d) ?
-                          get_user(vmask, (unsigned long *)bmap_ptr) :
-                          get_user(vmask, (unsigned int *)bmap_ptr)) )
-                rc = -EFAULT, vmask = 0;
-            pmask = vcpumask_to_pcpumask(d, vmask);
+            rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr,
+                                                                     void),
+                                      &pmask);
             if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_TLB )
                 cpu_clear(smp_processor_id(), pmask);
             flush_tlb_one_mask(&pmask, va);
--- 2009-06-10.orig/xen/arch/x86/x86_64/compat/mm.c	2009-01-15 10:02:31.000000000 +0100
+++ 2009-06-10/xen/arch/x86/x86_64/compat/mm.c	2009-06-10 10:46:17.000000000 +0200
@@ -278,18 +278,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
             }
 
 #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
-            do \
-            { \
-                unsigned int vcpumask; \
-                if ( i < --limit ) \
-                { \
-                    (_d_)->arg2.vcpumask.p = (void *)(nat_ops.p + limit); \
-                    if ( copy_from_compat(&vcpumask, (_s_)->arg2.vcpumask, 1) == 0 ) \
-                        *(unsigned long *)(_d_)->arg2.vcpumask.p = vcpumask; \
-                    else \
-                        rc = -EFAULT; \
-                } \
-            } while(0)
+        guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask)
             XLAT_mmuext_op(nat_op, &cmp_op);
 #undef XLAT_mmuext_op_HNDL_arg2_vcpumask
 
--- 2009-06-10.orig/xen/include/asm-x86/guest_access.h	2008-09-08 12:57:38.000000000 +0200
+++ 2009-06-10/xen/include/asm-x86/guest_access.h	2009-06-10 10:46:17.000000000 +0200
@@ -45,6 +45,8 @@
 
 #define guest_handle_from_ptr(ptr, type)        \
     ((XEN_GUEST_HANDLE(type)) { (type *)ptr })
+#define const_guest_handle_from_ptr(ptr, type)  \
+    ((XEN_GUEST_HANDLE(const_##type)) { (const type *)ptr })
 
 /*
  * Copy an array of objects to guest context via a guest handle,

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/3] x86: properly handle vcpu-to-pcpu mask conversion
  2009-06-10 14:36 [PATCH 1/3] x86: properly handle vcpu-to-pcpu mask conversion Jan Beulich
@ 2009-06-10 16:48 ` Keir Fraser
  2009-06-15  7:20   ` [PATCH 1/3] x86: properly handle vcpu-to-pcpu maskconversion Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Keir Fraser @ 2009-06-10 16:48 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 10/06/2009 17:36, "Jan Beulich" <JBeulich@novell.com> wrote:

> This is one of the prerequisites to extend to number of vCPU-s the
> hypervisor can support per guest.
> 
> Signed-off-by: Jan Beulich <jbeulich@novell.com>

Okay, I didn't see patch 0/3 that summarises the overall approach, nor patch
3/3 which would actually make greater numbers of VCPUs usable. Obviously
that is the most interesting patch since it is a change, or at least
extension, of guest interface. Also it's what makes patches 1/3 and 2/3
actually useful.

 -- Keir

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/3] x86: properly handle vcpu-to-pcpu maskconversion
  2009-06-10 16:48 ` Keir Fraser
@ 2009-06-15  7:20   ` Jan Beulich
  2009-06-15  7:42     ` Keir Fraser
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2009-06-15  7:20 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel

>>> Keir Fraser <keir.fraser@eu.citrix.com> 10.06.09 18:48 >>>
>On 10/06/2009 17:36, "Jan Beulich" <JBeulich@novell.com> wrote:
>
>> This is one of the prerequisites to extend to number of vCPU-s the
>> hypervisor can support per guest.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@novell.com>
>
>Okay, I didn't see patch 0/3 that summarises the overall approach, nor patch

I didn't send a summary, as there was nothing that really needs summarizing.

>3/3 which would actually make greater numbers of VCPUs usable. Obviously
>that is the most interesting patch since it is a change, or at least
>extension, of guest interface. Also it's what makes patches 1/3 and 2/3
>actually useful.

A glitch of the list server? I see the mail on the xen-devel archives. And I
know that I frequently get mails that the list server appears to have delayed
for hours or even days. But if you still didn't get it, I can certainly re-send
it. Just let me know...

Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/3] x86: properly handle vcpu-to-pcpu maskconversion
  2009-06-15  7:20   ` [PATCH 1/3] x86: properly handle vcpu-to-pcpu maskconversion Jan Beulich
@ 2009-06-15  7:42     ` Keir Fraser
  0 siblings, 0 replies; 4+ messages in thread
From: Keir Fraser @ 2009-06-15  7:42 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel

On 15/06/2009 08:20, "Jan Beulich" <JBeulich@novell.com> wrote:

>> 3/3 which would actually make greater numbers of VCPUs usable. Obviously
>> that is the most interesting patch since it is a change, or at least
>> extension, of guest interface. Also it's what makes patches 1/3 and 2/3
>> actually useful.
> 
> A glitch of the list server? I see the mail on the xen-devel archives. And I
> know that I frequently get mails that the list server appears to have delayed
> for hours or even days. But if you still didn't get it, I can certainly
> re-send
> it. Just let me know...

Found it. I seem to have a random spam filter.

 -- Keir

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2009-06-15  7:42 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-06-10 14:36 [PATCH 1/3] x86: properly handle vcpu-to-pcpu mask conversion Jan Beulich
2009-06-10 16:48 ` Keir Fraser
2009-06-15  7:20   ` [PATCH 1/3] x86: properly handle vcpu-to-pcpu maskconversion Jan Beulich
2009-06-15  7:42     ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.